diff options
Diffstat (limited to 'drivers/gpu')
1013 files changed, 67158 insertions, 28852 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7e089c47a58e..7041323a7bff 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -54,6 +54,9 @@ config DRM_DEBUG_MM If in doubt, say "N". +config DRM_EXPORT_FOR_TESTS + bool + config DRM_DEBUG_SELFTEST tristate "kselftests for DRM" depends on DRM @@ -61,6 +64,7 @@ config DRM_DEBUG_SELFTEST select PRIME_NUMBERS select DRM_LIB_RANDOM select DRM_KMS_HELPER + select DRM_EXPORT_FOR_TESTS if m default n help This option provides kernel modules that can be used to run @@ -93,6 +97,21 @@ config DRM_KMS_FB_HELPER help FBDEV helpers for KMS drivers. +config DRM_DEBUG_DP_MST_TOPOLOGY_REFS + bool "Enable refcount backtrace history in the DP MST helpers" + depends on STACKTRACE_SUPPORT + select STACKDEPOT + depends on DRM_KMS_HELPER + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debug tracing for topology refs in DRM's DP MST helpers. A + history of each topology reference/dereference will be printed to the + kernel log once a port or branch device's topology refcount reaches 0. + + This has the potential to use a lot of memory and print some very + large kernel messages. If in doubt, say "N". + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" depends on DRM @@ -165,6 +184,13 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_TTM_DMA_PAGE_POOL + bool + depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU) + default y + help + Choose this if you need the TTM dma page pool + config DRM_VRAM_HELPER tristate depends on DRM @@ -232,9 +258,9 @@ config DRM_AMDGPU tristate "AMD GPU" depends on DRM && PCI && MMU select FW_LOADER - select DRM_KMS_HELPER + select DRM_KMS_HELPER select DRM_SCHED - select DRM_TTM + select DRM_TTM select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index d968c2471412..13340f353ea8 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig @@ -1,12 +1,12 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT menu "ACP (Audio CoProcessor) Configuration" config DRM_AMD_ACP - bool "Enable AMD Audio CoProcessor IP support" - depends on DRM_AMDGPU - select MFD_CORE - select PM_GENERIC_DOMAINS if PM - help + bool "Enable AMD Audio CoProcessor IP support" + depends on DRM_AMDGPU + select MFD_CORE + select PM_GENERIC_DOMAINS if PM + help Choose this option to enable ACP IP support for AMD SOCs. This adds the ACP (Audio CoProcessor) IP driver and wires it up into the amdgpu driver. The ACP block provides the DMA diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 2e98c016cb47..9375e7f12420 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT config DRM_AMDGPU_SI bool "Enable amdgpu support for SI parts" depends on DRM_AMDGPU diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 00962a659009..7ae3b22c5628 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ - amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o + amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ + amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ + amdgpu_umc.o smu_v11_0_i2c.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o @@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o navi12_reg_init.o + arct_reg_init.o navi12_reg_init.o mxgpu_nv.o # add DF block amdgpu-y += \ @@ -83,7 +84,7 @@ amdgpu-y += \ # add UMC block amdgpu-y += \ - umc_v6_1.o + umc_v6_1.o umc_v6_0.o # add IH block amdgpu-y += \ @@ -146,12 +147,16 @@ amdgpu-y += \ vce_v3_0.o \ vce_v4_0.o -# add VCN block +# add VCN and JPEG block amdgpu-y += \ amdgpu_vcn.o \ vcn_v1_0.o \ vcn_v2_0.o \ - vcn_v2_5.o + vcn_v2_5.o \ + amdgpu_jpeg.o \ + jpeg_v1_0.o \ + jpeg_v2_0.o \ + jpeg_v2_5.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bd37df5dd6d0..81a531b652aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -69,10 +69,12 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "amdgpu_vcn.h" +#include "amdgpu_jpeg.h" #include "amdgpu_mn.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" +#include "amdgpu_nbio.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_csa.h" @@ -106,6 +108,8 @@ struct amdgpu_mgpu_info uint32_t num_apu; }; +#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 + /* * Modules parameters. */ @@ -122,6 +126,7 @@ extern int amdgpu_disp_priority; extern int amdgpu_hw_i2c; extern int amdgpu_pcie_gen2; extern int amdgpu_msi; +extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; extern int amdgpu_dpm; extern int amdgpu_fw_load_type; extern int amdgpu_aspm; @@ -135,6 +140,7 @@ extern int amdgpu_vm_fragment_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_vm_update_mode; +extern int amdgpu_exp_hw_support; extern int amdgpu_dc; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; @@ -146,11 +152,7 @@ extern uint amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern uint amdgpu_pp_feature_mask; -extern int amdgpu_ngg; -extern int amdgpu_prim_buf_per_se; -extern int amdgpu_pos_buf_per_se; -extern int amdgpu_cntl_sb_buf_per_se; -extern int amdgpu_param_buf_per_se; +extern uint amdgpu_force_long_training; extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; @@ -167,6 +169,12 @@ extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; extern int amdgpu_noretry; +extern int amdgpu_force_asic_type; +#ifdef CONFIG_HSA_AMD +extern int sched_policy; +#else +static const int sched_policy = KFD_SCHED_POLICY_HWS; +#endif #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; @@ -283,6 +291,9 @@ struct amdgpu_ip_block_version { const struct amd_ip_funcs *funcs; }; +#define HW_REV(_Major, _Minor, _Rev) \ + ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) + struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; @@ -425,7 +436,6 @@ struct amdgpu_fpriv { }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); -int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev); int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); @@ -477,7 +487,6 @@ struct amdgpu_cs_parser { uint64_t bytes_moved_vis_threshold; uint64_t bytes_moved; uint64_t bytes_moved_vis; - struct amdgpu_bo_list_entry *evictable; /* user fence */ struct amdgpu_bo_list_entry uf_entry; @@ -580,6 +589,8 @@ struct amdgpu_asic_funcs { bool (*need_reset_on_init)(struct amdgpu_device *adev); /* PCIe replay counter */ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); + /* device supports BACO */ + bool (*supports_baco)(struct amdgpu_device *adev); }; /* @@ -624,6 +635,11 @@ struct amdgpu_fw_vram_usage { u64 size; struct amdgpu_bo *reserved_bo; void *va; + + /* Offset on the top of VRAM, used as c2p write buffer. + */ + u64 mem_train_fb_loc; + bool mem_train_support; }; /* @@ -644,71 +660,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); - -/* - * amdgpu nbio functions - * - */ -struct nbio_hdp_flush_reg { - u32 ref_and_mask_cp0; - u32 ref_and_mask_cp1; - u32 ref_and_mask_cp2; - u32 ref_and_mask_cp3; - u32 ref_and_mask_cp4; - u32 ref_and_mask_cp5; - u32 ref_and_mask_cp6; - u32 ref_and_mask_cp7; - u32 ref_and_mask_cp8; - u32 ref_and_mask_cp9; - u32 ref_and_mask_sdma0; - u32 ref_and_mask_sdma1; - u32 ref_and_mask_sdma2; - u32 ref_and_mask_sdma3; - u32 ref_and_mask_sdma4; - u32 ref_and_mask_sdma5; - u32 ref_and_mask_sdma6; - u32 ref_and_mask_sdma7; -}; - struct amdgpu_mmio_remap { u32 reg_offset; resource_size_t bus_addr; }; -struct amdgpu_nbio_funcs { - const struct nbio_hdp_flush_reg *hdp_flush_reg; - u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); - u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); - u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); - u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); - u32 (*get_rev_id)(struct amdgpu_device *adev); - void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); - u32 (*get_memsize)(struct amdgpu_device *adev); - void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index, int doorbell_size); - void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, - int doorbell_index, int instance); - void (*enable_doorbell_aperture)(struct amdgpu_device *adev, - bool enable); - void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, - bool enable); - void (*ih_doorbell_range)(struct amdgpu_device *adev, - bool use_doorbell, int doorbell_index); - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, - bool enable); - void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, - bool enable); - void (*get_clockgating_state)(struct amdgpu_device *adev, - u32 *flags); - void (*ih_control)(struct amdgpu_device *adev); - void (*init_registers)(struct amdgpu_device *adev); - void (*detect_hw_virt)(struct amdgpu_device *adev); - void (*remap_hdp_registers)(struct amdgpu_device *adev); -}; - struct amdgpu_df_funcs { void (*sw_init)(struct amdgpu_device *adev); + void (*sw_fini)(struct amdgpu_device *adev); void (*enable_broadcast_mode)(struct amdgpu_device *adev, bool enable); u32 (*get_fb_channel_number)(struct amdgpu_device *adev); @@ -748,6 +707,7 @@ enum amd_hw_ip_block_type { MP1_HWIP, UVD_HWIP, VCN_HWIP = UVD_HWIP, + JPEG_HWIP = VCN_HWIP, VCE_HWIP, DF_HWIP, DCE_HWIP, @@ -813,6 +773,7 @@ struct amdgpu_device { uint8_t *bios; uint32_t bios_size; struct amdgpu_bo *stolen_vga_memory; + struct amdgpu_bo *discovery_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -921,6 +882,12 @@ struct amdgpu_device { u32 cg_flags; u32 pg_flags; + /* nbio */ + struct amdgpu_nbio nbio; + + /* mmhub */ + struct amdgpu_mmhub mmhub; + /* gfx */ struct amdgpu_gfx gfx; @@ -936,6 +903,9 @@ struct amdgpu_device { /* vcn */ struct amdgpu_vcn vcn; + /* jpeg */ + struct amdgpu_jpeg jpeg; + /* firmwares */ struct amdgpu_firmware firmware; @@ -974,9 +944,7 @@ struct amdgpu_device { /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; - const struct amdgpu_nbio_funcs *nbio_funcs; const struct amdgpu_df_funcs *df_funcs; - const struct amdgpu_mmhub_funcs *mmhub_funcs; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; @@ -1006,11 +974,11 @@ struct amdgpu_device { struct mutex lock_reset; struct amdgpu_doorbell_index doorbell_index; + struct mutex notifier_lock; + int asic_reset_res; struct work_struct xgmi_reset_work; - bool in_baco_reset; - long gfx_timeout; long sdma_timeout; long video_timeout; @@ -1018,6 +986,16 @@ struct amdgpu_device { uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; + + /* device pstate */ + int pstate; + /* enable runtime pm on the device */ + bool runpm; + + bool pm_sysfs_en; + bool ucode_sysfs_en; + + bool in_baco; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1032,6 +1010,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, void amdgpu_device_fini(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write); uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, @@ -1151,6 +1131,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) +#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) + #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); /* Common functions */ @@ -1167,9 +1149,12 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); -bool amdgpu_device_is_px(struct drm_device *dev); +bool amdgpu_device_supports_boco(struct drm_device *dev); +bool amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_device_baco_enter(struct drm_device *dev); +int amdgpu_device_baco_exit(struct drm_device *dev); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -1207,8 +1192,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); +int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 07eb29885372..d3da9dde4ee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void) void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { - const struct kfd2kgd_calls *kfd2kgd; - - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_KAVERI: - case CHIP_HAWAII: - kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); - break; -#endif - case CHIP_CARRIZO: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); - break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); - break; - case CHIP_ARCTURUS: - kfd2kgd = amdgpu_amdkfd_arcturus_get_functions(); - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); - break; - default: - dev_info(adev->dev, "kfd not supported on this ASIC\n"); - return; - } + bool vf = amdgpu_sriov_vf(adev); adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->pdev, adev->asic_type, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -165,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->gfx.mec.queue_bitmap, KGD_MAX_QUEUES); - /* remove the KIQ bit as well */ - if (adev->gfx.kiq.ring.sched.ready) - clear_bit(amdgpu_gfx_mec_queue_to_bit(adev, - adev->gfx.kiq.ring.me - 1, - adev->gfx.kiq.ring.pipe, - adev->gfx.kiq.ring.queue), - gpu_resources.queue_bitmap); - /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant */ @@ -202,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->doorbell_index.last_non_cp; } - kgd2kfd_device_init(adev->kfd.dev, &gpu_resources); + kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources); } } @@ -709,38 +666,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) return 0; } -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void) -{ - return NULL; -} - struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g) + unsigned int asic_type, bool vf) { return NULL; } bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) { return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e519df3fd2b6..069d5d230810 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -57,7 +57,7 @@ struct kgd_mem { unsigned int mapped_to_gpu_memory; uint64_t va; - uint32_t mapping_flags; + uint32_t alloc_flags; atomic_t invalid; struct amdkfd_process_info *process_info; @@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void); - bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); @@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); +/* Read user wptr from a specified user address space with page fault + * disabled. The memory must be pinned and mapped to the hardware when + * this is called in hqd_load functions, so it should never fault in + * the first place. This resolves a circular lock dependency involving + * four locks, including the DQM lock and mmap_sem. + */ #define read_user_wptr(mmptr, wptr, dst) \ ({ \ bool valid = false; \ if ((mmptr) && (wptr)) { \ + pagefault_disable(); \ if ((mmptr) == current->mm) { \ valid = !get_user((dst), (wptr)); \ } else if (current->mm == NULL) { \ @@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s valid = !get_user((dst), (wptr)); \ unuse_mm(mmptr); \ } \ + pagefault_enable(); \ } \ valid; \ }) @@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); int kgd2kfd_init(void); void kgd2kfd_exit(void); struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g); + unsigned int asic_type, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index c79aaebeeaf0..b6713e0ed1b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -19,10 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#undef pr_fmt -#define pr_fmt(fmt) "kfd2kgd: " fmt - #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> @@ -69,11 +65,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[8] = { + uint32_t sdma_engine_reg_base[8] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, SOC15_REG_OFFSET(SDMA1, 0, @@ -91,111 +87,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, SOC15_REG_OFFSET(SDMA7, 0, mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL }; - uint32_t retval; - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } -static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, - u32 instance, u32 offset) -{ - switch (instance) { - case 0: - return (adev->reg_offset[SDMA0_HWIP][0][0] + offset); - case 1: - return (adev->reg_offset[SDMA1_HWIP][0][1] + offset); - case 2: - return (adev->reg_offset[SDMA2_HWIP][0][1] + offset); - case 3: - return (adev->reg_offset[SDMA3_HWIP][0][1] + offset); - case 4: - return (adev->reg_offset[SDMA4_HWIP][0][1] + offset); - case 5: - return (adev->reg_offset[SDMA5_HWIP][0][1] + offset); - case 6: - return (adev->reg_offset[SDMA6_HWIP][0][1] + offset); - case 7: - return (adev->reg_offset[SDMA7_HWIP][0][1] + offset); - default: - break; - } - return 0; -} - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev, - m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -205,7 +172,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) @@ -215,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -235,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -255,40 +223,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -static const struct kfd2kgd_calls kfd2kgd = { +const struct kfd2kgd_calls arcturus_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, @@ -304,20 +274,11 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; - -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index d10f483f5e27..61cd707158e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -19,18 +19,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#undef pr_fmt -#define pr_fmt(fmt) "kfd2kgd: " fmt - -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" -#include "soc15_hw_ip.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" #include "navi10_enum.h" @@ -42,6 +33,7 @@ #include "v10_structs.h" #include "nv.h" #include "nvd.h" +#include "gfxhub_v2_0.h" enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -50,63 +42,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -#if 0 -static uint32_t get_watch_base_addr(struct amdgpu_device *adev); -#endif -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -139,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .get_tile_config = amdgpu_amdkfd_get_tile_config, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions() -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -250,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, ATC_VMID0_PASID_MAPPING__VALID_MASK; pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping); - /* - * need to do this twice, once for gfx and once for mmhub - * for ATC add 16 to VMID for mmhub, for IH different registers. - * ATC_VMID0..15 registers are separate from ATC_VMID16..31. - */ pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, @@ -306,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[2] = { + uint32_t sdma_engine_reg_base[2] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, /* On gfx10, mmSDMA1_xxx registers are defined NOT based @@ -322,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL }; - uint32_t retval; - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } @@ -488,72 +387,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = m->sdma_engine_id ? - SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -563,28 +457,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id); - pr_debug("sdma base addr %x\n", sdma_base_addr); - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -618,14 +510,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -746,59 +638,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) - + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) @@ -830,6 +715,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid; + uint16_t queried_pasid; + bool ret; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; if (amdgpu_emu_mode == 0 && ring->sched.ready) @@ -838,13 +725,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) - == pasid) { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); - break; - } + + ret = get_atc_vmid_pasid_mapping_info(kgd, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + amdgpu_gmc_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, 0); + break; } } @@ -914,7 +801,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint64_t base = page_table_base | AMDGPU_PTE_VALID; if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", @@ -922,18 +808,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, return; } - /* TODO: take advantage of per-process address space size. For - * now, all processes share the same address space size, like - * on GFX8 and older. - */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), - upper_32_bits(adev->vm_manager.max_pfn - 1)); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + /* SDMA is on gfxhub as well for Navi1* series */ + gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base); } + +const struct kfd2kgd_calls gfx_v10_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + get_atc_vmid_pasid_mapping_info, + .get_tile_config = amdgpu_amdkfd_get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .get_hive_id = amdgpu_amdkfd_get_hive_id, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 5f459bf5f622..6e6f0a99ec06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" @@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, - uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); - -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); - -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); - -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); -static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) +static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m) { uint32_t retval; retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", + m->sdma_engine_id, m->sdma_queue_id, retval); return retval; } @@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; unsigned long end_jiffies; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t data; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - if (m->sdma_engine_id) { - data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); - } else { - data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); - } data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdma_rlc_rb_rptr); if (read_user_wptr(mm, wptr, data)) - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); else - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdma_rlc_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, m->sdma_rlc_virtual_addr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdma_rlc_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdma_rlc_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdma_rlc_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); return 0; } @@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static void set_scratch_backing_va(struct kgd_dev *kgd, @@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); } + +const struct kfd2kgd_calls gfx_v7_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 6d2f61449606..bfbddedb2380 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -20,9 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" @@ -44,62 +41,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m) +static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m) { uint32_t retval; retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET; - pr_debug("sdma base address: 0x%x\n", retval); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", + m->sdma_engine_id, m->sdma_queue_id, retval); return retval; } @@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; unsigned long end_jiffies; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t data; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - if (m->sdma_engine_id) { - data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); - } else { - data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); - } data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); if (read_user_wptr(mm, wptr, data)) - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); else - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, m->sdmax_rlcx_virtual_addr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} + value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int kgd_address_watch_disable(struct kgd_dev *kgd) @@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) RREG32(mmVM_INVALIDATE_RESPONSE); return 0; } + +const struct kfd2kgd_calls gfx_v8_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + get_atc_vmid_pasid_mapping_info, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e262f2ac07a3..6f1a4676ddde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -19,17 +19,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#define pr_fmt(fmt) "kfd2kgd: " fmt - -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "soc15_hw_ip.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" #include "vega10_enum.h" @@ -47,11 +40,8 @@ #include "soc15d.h" #include "mmhub_v1_0.h" #include "gfxhub_v1_0.h" -#include "gmc_v9_0.h" - +#include "mmhub_v9_4.h" -#define V9_PIPE_PER_MEC (4) -#define V9_QUEUES_PER_PIPE_MEC (8) enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -226,22 +216,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[2] = { + uint32_t sdma_engine_reg_base[2] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL }; - uint32_t retval; + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); - - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } @@ -388,71 +377,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = m->sdma_engine_id ? - SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -462,7 +447,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) @@ -472,15 +458,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -514,14 +500,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -584,59 +570,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) - + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid, @@ -671,6 +650,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid, i; + uint16_t queried_pasid; + bool ret; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; uint32_t flush_type = 0; @@ -686,14 +667,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid) - == pasid) { - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - i, flush_type); - break; - } + + ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + for (i = 0; i < adev->num_vmhubs; i++) + amdgpu_gmc_flush_gpu_tlb(adev, vmid, + i, flush_type); + break; } } @@ -777,15 +758,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, return 0; } -void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid) -{ - /* No longer needed on GFXv9. The scratch base address is - * passed to the shader by the CP. It's the user mode driver's - * responsibility. - */ -} - void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { @@ -802,16 +774,14 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi * on GFX8 and older. */ if (adev->asic_type == CHIP_ARCTURUS) { - /* Two MMHUBs */ - mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base); - mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base); + mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); } else mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } -static const struct kfd2kgd_calls kfd2kgd = { +const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, @@ -827,19 +797,11 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 26d8879bff9d..d9e9ad22b2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid); void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 01a58d3f8e17..b2487f4f271b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -19,9 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#define pr_fmt(fmt) "kfd2kgd: " fmt - #include <linux/dma-buf.h> #include <linux/list.h> #include <linux/pagemap.h> @@ -33,11 +30,6 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" -/* Special VM and GART address alignment needed for VI pre-Fiji due to - * a HW bug. - */ -#define VI_BO_SIZE_ALIGN (0x8000) - /* BO flag to indicate a KFD userptr BO */ #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) @@ -93,7 +85,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, } /* Set memory usage limits. Current, limits are - * System (TTM + userptr) memory - 3/4th System RAM + * System (TTM + userptr) memory - 15/16th System RAM * TTM memory - 3/8th System RAM */ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) @@ -106,18 +98,31 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) mem *= si.mem_unit; spin_lock_init(&kfd_mem_limit.mem_limit_lock); - kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); + kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4); kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", (kfd_mem_limit.max_system_mem_limit >> 20), (kfd_mem_limit.max_ttm_mem_limit >> 20)); } +/* Estimate page table size needed to represent a given memory size + * + * With 4KB pages, we need one 8 byte PTE for each 4KB of memory + * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB + * of memory (factor 256K, >> 18). ROCm user mode tries to optimize + * for 2MB pages for TLB efficiency. However, small allocations and + * fragmented system memory still need some 4KB pages. We choose a + * compromise that should work in most cases without reserving too + * much memory for page tables unnecessarily (factor 16K, >> 14). + */ +#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) + static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 domain, bool sg) { + uint64_t reserved_for_pt = + ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; - uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9; int ret = 0; acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, @@ -349,11 +354,44 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; - ret = amdgpu_vm_update_directories(adev, vm); + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) return ret; - return amdgpu_sync_fence(NULL, sync, vm->last_update, false); + return amdgpu_sync_fence(sync, vm->last_update, false); +} + +static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) +{ + struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + uint32_t mapping_flags; + + mapping_flags = AMDGPU_VM_PAGE_READABLE; + if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; + if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (bo_adev == adev) + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; + else + mapping_flags |= AMDGPU_VM_MTYPE_UC; + } else { + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + break; + default: + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + + return amdgpu_gem_va_map_flags(adev, mapping_flags); } /* add_bo_to_vm - Add a BO to a VM @@ -404,8 +442,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, } bo_va_entry->va = va; - bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev, - mem->mapping_flags); + bo_va_entry->pte_flags = get_pte_flags(adev, mem); bo_va_entry->kgd_dev = (void *)adev; list_add(&bo_va_entry->bo_list, list_bo_va); @@ -481,8 +518,7 @@ static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem, * * Returns 0 for success, negative errno for errors. */ -static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, - uint64_t user_addr) +static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr) { struct amdkfd_process_info *process_info = mem->process_info; struct amdgpu_bo *bo = mem->bo; @@ -714,7 +750,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); - amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); + amdgpu_sync_fence(sync, bo_va->last_pt_update, false); return 0; } @@ -733,7 +769,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, return ret; } - return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); + return amdgpu_sync_fence(sync, bo_va->last_pt_update, false); } static int map_bo_to_gpuvm(struct amdgpu_device *adev, @@ -1079,10 +1115,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t user_addr = 0; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; - int byte_align; u32 domain, alloc_domain; u64 alloc_flags; - uint32_t mapping_flags; int ret; /* @@ -1135,25 +1169,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if ((*mem)->aql_queue) size = size >> 1; - /* Workaround for TLB bug on older VI chips */ - byte_align = (adev->family == AMDGPU_FAMILY_VI && - adev->asic_type != CHIP_FIJI && - adev->asic_type != CHIP_POLARIS10 && - adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12 && - adev->asic_type != CHIP_VEGAM) ? - VI_BO_SIZE_ALIGN : 1; - - mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (flags & ALLOC_MEM_FLAGS_WRITABLE) - mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (flags & ALLOC_MEM_FLAGS_EXECUTABLE) - mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; - if (flags & ALLOC_MEM_FLAGS_COHERENT) - mapping_flags |= AMDGPU_VM_MTYPE_UC; - else - mapping_flags |= AMDGPU_VM_MTYPE_NC; - (*mem)->mapping_flags = mapping_flags; + (*mem)->alloc_flags = flags; amdgpu_sync_create(&(*mem)->sync); @@ -1168,7 +1184,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( memset(&bp, 0, sizeof(bp)); bp.size = size; - bp.byte_align = byte_align; + bp.byte_align = 1; bp.domain = alloc_domain; bp.flags = alloc_flags; bp.type = bo_type; @@ -1195,7 +1211,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); if (user_addr) { - ret = init_user_pages(*mem, current->mm, user_addr); + ret = init_user_pages(*mem, user_addr); if (ret) goto allocate_init_user_pages_failed; } @@ -1626,9 +1642,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); - (*mem)->mapping_flags = - AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | - AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; + (*mem)->alloc_flags = + ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | + ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; (*mem)->bo = amdgpu_bo_ref(bo); (*mem)->va = va; @@ -1657,10 +1674,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) { struct amdkfd_process_info *process_info = mem->process_info; - int invalid, evicted_bos; + int evicted_bos; int r = 0; - invalid = atomic_inc_return(&mem->invalid); + atomic_inc(&mem->invalid); evicted_bos = atomic_inc_return(&process_info->evicted_bos); if (evicted_bos == 1) { /* First eviction, stop the queues */ @@ -1739,6 +1756,10 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, return ret; } + /* + * FIXME: Cannot ignore the return code, must hold + * notifier_lock + */ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); /* Mark the BO as valid unless it was invalidated @@ -2027,7 +2048,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) pr_debug("Memory eviction: Validate BOs failed. Try again\n"); goto validate_map_fail; } - ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); + ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false); if (ret) { pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); goto validate_map_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1c9d40f97a9b..9ba80d828876 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -338,17 +338,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { - uint8_t con_obj_id, con_obj_num, con_obj_type; - - con_obj_id = + uint8_t con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - con_obj_num = - (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) - >> ENUM_ID_SHIFT; - con_obj_type = - (le16_to_cpu(path->usConnObjectId) & - OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* Skip TV/CV support */ if ((le16_to_cpu(path->usDeviceTag) == @@ -373,15 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { - uint8_t grph_obj_id, grph_obj_num, grph_obj_type; - - grph_obj_id = - (le16_to_cpu(path->usGraphicObjIds[j]) & - OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; - grph_obj_num = - (le16_to_cpu(path->usGraphicObjIds[j]) & - ENUM_ID_MASK) >> ENUM_ID_SHIFT; - grph_obj_type = + uint8_t grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; @@ -2038,6 +2022,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) if (adev->is_atom_fw) { amdgpu_atomfirmware_scratch_regs_init(adev); amdgpu_atomfirmware_allocate_fb_scratch(adev); + ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev); + if (ret) { + DRM_ERROR("Failed to get mem train fb location.\n"); + return ret; + } } else { amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index daf687428cdb..ff4eb96bdfb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -27,6 +27,7 @@ #include "amdgpu_atomfirmware.h" #include "atom.h" #include "atombios.h" +#include "soc15_hw_ip.h" bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) { @@ -120,65 +121,14 @@ union vram_info { struct atom_vram_info_header_v2_3 v23; struct atom_vram_info_header_v2_4 v24; }; -/* - * Return vram width from integrated system info table, if available, - * or 0 if not. - */ -int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; - u16 data_offset, size; - union igp_info *igp_info; - union vram_info *vram_info; - u32 mem_channel_number; - u32 mem_channel_width; - u8 frev, crev; - - if (adev->flags & AMD_IS_APU) - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - integratedsysteminfo); - else - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - vram_info); - /* get any igp specific overrides */ - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, - &frev, &crev, &data_offset)) { - if (adev->flags & AMD_IS_APU) { - igp_info = (union igp_info *) - (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 11: - mem_channel_number = igp_info->v11.umachannelnumber; - /* channel width is 64 */ - return mem_channel_number * 64; - default: - return 0; - } - } else { - vram_info = (union vram_info *) - (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 3: - mem_channel_number = vram_info->v23.vram_module[0].channel_num; - mem_channel_width = vram_info->v23.vram_module[0].channel_width; - return mem_channel_number * (1 << mem_channel_width); - case 4: - mem_channel_number = vram_info->v24.vram_module[0].channel_num; - mem_channel_width = vram_info->v24.vram_module[0].channel_width; - return mem_channel_number * (1 << mem_channel_width); - default: - return 0; - } - } - } - - return 0; -} +union vram_module { + struct atom_vram_module_v9 v9; + struct atom_vram_module_v10 v10; +}; -static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, - int atom_mem_type) +static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, + int atom_mem_type) { int vram_type; @@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, return vram_type; } -/* - * Return vram type from either integrated system info table - * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not - */ -int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) + + +int +amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, + int *vram_width, int *vram_type, + int *vram_vendor) { struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; + int index, i = 0; u16 data_offset, size; union igp_info *igp_info; union vram_info *vram_info; + union vram_module *vram_module; u8 frev, crev; u8 mem_type; + u8 mem_vendor; + u32 mem_channel_number; + u32 mem_channel_width; + u32 module_id; if (adev->flags & AMD_IS_APU) index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, @@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) else index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { @@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset); switch (crev) { case 11: + mem_channel_number = igp_info->v11.umachannelnumber; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; mem_type = igp_info->v11.memorytype; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; default: - return 0; + return -EINVAL; } } else { vram_info = (union vram_info *) (mode_info->atom_context->bios + data_offset); + module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; switch (crev) { case 3: - mem_type = vram_info->v23.vram_module[0].memory_type; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (module_id > vram_info->v23.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v23.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v9.vram_module_size); + i++; + } + mem_type = vram_module->v9.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v9.channel_num; + mem_channel_width = vram_module->v9.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; case 4: - mem_type = vram_info->v24.vram_module[0].memory_type; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (module_id > vram_info->v24.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v24.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v10.vram_module_size); + i++; + } + mem_type = vram_module->v10.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v10.channel_num; + mem_channel_width = vram_module->v10.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; default: - return 0; + return -EINVAL; } } + } return 0; @@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) } return -EINVAL; } + +/* + * Check if VBIOS supports GDDR6 training data save/restore + */ +static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev) +{ + uint16_t data_offset; + int index; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, + NULL, NULL, &data_offset)) { + struct atom_firmware_info_v3_1 *firmware_info = + (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + + data_offset); + + DRM_DEBUG("atom firmware capability:0x%08x.\n", + le32_to_cpu(firmware_info->firmware_capability)); + + if (le32_to_cpu(firmware_info->firmware_capability) & + ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) + return true; + } + + return false; +} + +static int gddr6_mem_train_support(struct amdgpu_device *adev) +{ + int ret; + uint32_t major, minor, revision, hw_v; + + if (gddr6_mem_train_vbios_support(adev)) { + amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision); + hw_v = HW_REV(major, minor, revision); + /* + * treat 0 revision as a special case since register for MP0 and MMHUB is missing + * for some Navi10 A0, preventing driver from discovering the hwip information since + * none of the functions will be initialized, it should not cause any problems + */ + switch (hw_v) { + case HW_REV(11, 0, 0): + case HW_REV(11, 0, 5): + ret = 1; + break; + default: + DRM_ERROR("memory training vbios supports but psp hw(%08x)" + " doesn't support!\n", hw_v); + ret = -1; + break; + } + } else { + ret = 0; + hw_v = -1; + } + + + DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret); + return ret; +} + +int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) +{ + struct atom_context *ctx = adev->mode_info.atom_context; + unsigned char *bios = ctx->bios; + struct vram_reserve_block *reserved_block; + int index, block_number; + uint8_t frev, crev; + uint16_t data_offset, size; + uint32_t start_address_in_kb; + uint64_t offset; + int ret; + + adev->fw_vram_usage.mem_train_support = false; + + if (adev->asic_type != CHIP_NAVI10 && + adev->asic_type != CHIP_NAVI14) + return 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + ret = gddr6_mem_train_support(adev); + if (ret == -1) + return -EINVAL; + else if (ret == 0) + return 0; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + vram_usagebyfirmware); + ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, + &data_offset); + if (ret == 0) { + DRM_ERROR("parse data header failed.\n"); + return -EINVAL; + } + + DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x," + " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset); + /* only support 2.1+ */ + if (((uint16_t)frev << 8 | crev) < 0x0201) { + DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev); + return -EINVAL; + } + + reserved_block = (struct vram_reserve_block *) + (bios + data_offset + sizeof(struct atom_common_table_header)); + block_number = ((unsigned int)size - sizeof(struct atom_common_table_header)) + / sizeof(struct vram_reserve_block); + reserved_block += (block_number > 0) ? block_number-1 : 0; + DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n", + block_number, + le32_to_cpu(reserved_block->start_address_in_kb), + le16_to_cpu(reserved_block->used_by_firmware_in_kb), + le16_to_cpu(reserved_block->used_by_driver_in_kb)); + if (reserved_block->used_by_firmware_in_kb > 0) { + start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb); + offset = (uint64_t)start_address_in_kb * ONE_KiB; + if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) { + offset -= ONE_MiB; + } + + offset &= ~(ONE_MiB - 1); + adev->fw_vram_usage.mem_train_fb_loc = offset; + adev->fw_vram_usage.mem_train_support = true; + DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset); + ret = 0; + } else { + DRM_ERROR("used_by_firmware_in_kb is 0!\n"); + ret = -EINVAL; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 5ec6f92f353c..f871af5ea6f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -29,8 +29,9 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); -int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); -int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, + int *vram_width, int *vram_type, int *vram_vendor); +int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 3e35a8f2c5e5..a97fb759e2f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void) bool d3_supported = false; struct pci_dev *parent_pdev; - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - vga_count++; - - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); - - parent_pdev = pci_upstream_bridge(pdev); - d3_supported |= parent_pdev && parent_pdev->bridge_d3; - amdgpu_atpx_get_quirks(pdev); - } - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 649e68c4479b..d1495e1c9289 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, { unsigned long start_jiffies; unsigned long end_jiffies; - struct dma_fence *fence = NULL; + struct dma_fence *fence; int i, r; start_jiffies = jiffies; @@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, if (r) goto exit_do_move; r = dma_fence_wait(fence, false); + dma_fence_put(fence); if (r) goto exit_do_move; - dma_fence_put(fence); } end_jiffies = jiffies; r = jiffies_to_msecs(end_jiffies - start_jiffies); exit_do_move: - if (fence) - dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 61e38e43ad1d..85b0515c0fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, return 0; error_free: - while (i--) { + for (i = 0; i < last_entry; ++i) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); + + amdgpu_bo_unref(&bo); + } + for (i = first_userptr; i < num_entries; ++i) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); amdgpu_bo_unref(&bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index d8729285f731..a62cbc8199de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1019,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) */ if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_connector *list_connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *list_amdgpu_connector; - list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(list_connector, + &iter) { if (connector == list_connector) continue; list_amdgpu_connector = to_amdgpu_connector(list_connector); @@ -1037,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) } } } + drm_connector_list_iter_end(&iter); } } } @@ -1494,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *amdgpu_dig_connector; struct drm_encoder *encoder; @@ -1508,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev, return; /* see if we already added it */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->connector_id == connector_id) { amdgpu_connector->devices |= supported_device; + drm_connector_list_iter_end(&iter); return; } if (amdgpu_connector->ddc_bus && i2c_bus->valid) { @@ -1526,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, } } } + drm_connector_list_iter_end(&iter); /* check if it's a dp bridge */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 253158fc378f..5b330f69194b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -35,6 +35,7 @@ #include "amdgpu_trace.h" #include "amdgpu_gmc.h" #include "amdgpu_gem.h" +#include "amdgpu_ras.h" static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, struct drm_amdgpu_cs_chunk_fence *data, @@ -449,75 +450,12 @@ retry: return r; } -/* Last resort, try to evict something from the current working set */ -static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, - struct amdgpu_bo *validated) -{ - uint32_t domain = validated->allowed_domains; - struct ttm_operation_ctx ctx = { true, false }; - int r; - - if (!p->evictable) - return false; - - for (;&p->evictable->tv.head != &p->validated; - p->evictable = list_prev_entry(p->evictable, tv.head)) { - - struct amdgpu_bo_list_entry *candidate = p->evictable; - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - bool update_bytes_moved_vis; - uint32_t other; - - /* If we reached our current BO we can forget it */ - if (bo == validated) - break; - - /* We can't move pinned BOs here */ - if (bo->pin_count) - continue; - - other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - - /* Check if this BO is in one of the domains we need space for */ - if (!(other & domain)) - continue; - - /* Check if we can move this BO somewhere else */ - other = bo->allowed_domains & ~domain; - if (!other) - continue; - - /* Good we can try to move this BO somewhere else */ - update_bytes_moved_vis = - !amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo); - amdgpu_bo_placement_from_domain(bo, other); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - p->bytes_moved += ctx.bytes_moved; - if (update_bytes_moved_vis) - p->bytes_moved_vis += ctx.bytes_moved; - - if (unlikely(r)) - break; - - p->evictable = list_prev_entry(p->evictable, tv.head); - list_move(&candidate->tv.head, &p->validated); - - return true; - } - - return false; -} - static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) { struct amdgpu_cs_parser *p = param; int r; - do { - r = amdgpu_cs_bo_validate(p, bo); - } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); + r = amdgpu_cs_bo_validate(p, bo); if (r) return r; @@ -536,7 +474,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, list_for_each_entry(lobj, validated, tv.head) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); - bool binding_userptr = false; struct mm_struct *usermm; usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); @@ -553,20 +490,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, lobj->user_pages); - binding_userptr = true; } - if (p->evictable == lobj) - p->evictable = NULL; - r = amdgpu_cs_validate(p, bo); if (r) return r; - if (binding_userptr) { - kvfree(lobj->user_pages); - lobj->user_pages = NULL; - } + kvfree(lobj->user_pages); + lobj->user_pages = NULL; } return 0; } @@ -607,8 +538,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, e->tv.num_shared = 2; amdgpu_bo_list_get_list(p->bo_list, &p->validated); - if (p->bo_list->first_userptr != p->bo_list->num_entries) - p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); @@ -661,9 +590,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, &p->bytes_moved_vis_threshold); p->bytes_moved = 0; p->bytes_moved_vis = 0; - p->evictable = list_last_entry(&p->validated, - struct amdgpu_bo_list_entry, - tv.head); r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, amdgpu_cs_validate, p); @@ -869,29 +795,23 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, - fpriv->prt_va->last_pt_update, false); + r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update); if (r) return r; if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { - struct dma_fence *f; - bo_va = fpriv->csa_va; BUG_ON(!bo_va); r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; - f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f, false); + r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); if (r) return r; } amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct dma_fence *f; - /* ignore duplicates */ bo = ttm_to_amdgpu_bo(e->tv.bo); if (!bo) @@ -905,8 +825,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - f = bo_va->last_pt_update; - r = amdgpu_sync_fence(adev, &p->job->sync, f, false); + r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); if (r) return r; } @@ -915,11 +834,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_update_directories(adev, vm); + r = amdgpu_vm_update_pdes(adev, vm, false); if (r) return r; - r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false); + r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update); if (r) return r; @@ -1061,7 +980,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, dma_fence_put(old); } - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + r = amdgpu_sync_fence(&p->job->sync, fence, true); dma_fence_put(fence); if (r) return r; @@ -1083,7 +1002,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, return r; } - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + r = amdgpu_sync_fence(&p->job->sync, fence, true); dma_fence_put(fence); return r; @@ -1291,11 +1210,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, if (r) goto error_unlock; - /* No memory allocation is allowed while holding the mn lock. - * p->mn is hold until amdgpu_cs_submit is finished and fence is added - * to BOs. + /* No memory allocation is allowed while holding the notifier lock. + * The lock is held until amdgpu_cs_submit is finished and fence is + * added to BOs. */ - amdgpu_mn_lock(p->mn); + mutex_lock(&p->adev->notifier_lock); /* If userptr are invalidated after amdgpu_cs_parser_bos(), return * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. @@ -1338,13 +1257,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); - amdgpu_mn_unlock(p->mn); + mutex_unlock(&p->adev->notifier_lock); return 0; error_abort: drm_sched_job_cleanup(&job->base); - amdgpu_mn_unlock(p->mn); + mutex_unlock(&p->adev->notifier_lock); error_unlock: amdgpu_job_free(job); @@ -1359,6 +1278,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) bool reserved_buffers = false; int i, r; + if (amdgpu_ras_intr_triggered()) + return -EHWPOISON; + if (!adev->accel_working) return -EBUSY; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6614d8a6f4c8..1d2bbf10614e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -169,10 +169,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, } break; case AMDGPU_HW_IP_VCN_JPEG: - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { - if (adev->vcn.harvest_config & (1 << j)) + for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) { + if (adev->jpeg.harvest_config & (1 << j)) continue; - rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg; + rings[num_rings++] = &adev->jpeg.inst[j].ring_dec; } break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 5652cc72ed3a..8e6726e0d035 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; int r = 0, i; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* hold on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) kthread_unpark(ring->sched.thread); } + mutex_unlock(&adev->lock_reset); + return 0; } @@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) if (!fences) return -ENOMEM; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* stop the scheduler */ kthread_park(ring->sched.thread); @@ -1075,10 +1083,11 @@ failure: /* restart the scheduler */ kthread_unpark(ring->sched.thread); + mutex_unlock(&adev->lock_reset); + ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (fences) - kfree(fences); + kfree(fences); return 0; } @@ -1090,8 +1099,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) { adev->debugfs_preempt = debugfs_create_file("amdgpu_preempt_ib", 0600, - adev->ddev->primary->debugfs_root, - (void *)adev, &fops_ib_preempt); + adev->ddev->primary->debugfs_root, adev, + &fops_ib_preempt); if (!(adev->debugfs_preempt)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); return -EIO; @@ -1103,8 +1112,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { - if (adev->debugfs_preempt) - debugfs_remove(adev->debugfs_preempt); + debugfs_remove(adev->debugfs_preempt); } #else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5a1939dbd4e3..a97946878024 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -65,6 +65,8 @@ #include "amdgpu_ras.h" #include "amdgpu_pmu.h" +#include <linux/suspend.h> + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 -static const char *amdgpu_asic_name[] = { +const char *amdgpu_asic_name[] = { "TAHITI", "PITCAIRN", "VERDE", @@ -135,14 +137,14 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO, static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); /** - * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control + * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control * * @dev: drm_device pointer * * Returns true if the device is a dGPU with HG/PX power control, * otherwise return false. */ -bool amdgpu_device_is_px(struct drm_device *dev) +bool amdgpu_device_supports_boco(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; @@ -151,6 +153,51 @@ bool amdgpu_device_is_px(struct drm_device *dev) return false; } +/** + * amdgpu_device_supports_baco - Does the device support BACO + * + * @dev: drm_device pointer + * + * Returns true if the device supporte BACO, + * otherwise return false. + */ +bool amdgpu_device_supports_baco(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + + return amdgpu_asic_supports_baco(adev); +} + +/** + * VRAM access helper functions. + * + * amdgpu_device_vram_access - read/write a buffer in vram + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + */ +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write) +{ + uint64_t last; + unsigned long flags; + + last = size - 4; + for (last += pos; pos <= last; pos += 4) { + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); + if (write) + WREG32_NO_KIQ(mmMM_DATA, *buf++); + else + *buf++ = RREG32_NO_KIQ(mmMM_DATA); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } +} + /* * MMIO register access helper functions. */ @@ -1023,12 +1070,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_device_check_block_size(adev); - ret = amdgpu_device_get_job_timeout_settings(adev); - if (ret) { - dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - return ret; - } - adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); return ret; @@ -1046,8 +1087,9 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + int r; - if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -1055,7 +1097,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_resume(dev, true, true); + pci_set_power_state(dev->pdev, PCI_D0); + pci_restore_state(dev->pdev); + r = pci_enable_device(dev->pdev); + if (r) + DRM_WARN("pci_enable_device failed (%d)\n", r); + amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); @@ -1063,7 +1110,11 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero pr_info("amdgpu: switched off\n"); drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_device_suspend(dev, true, true); + amdgpu_device_suspend(dev, true); + pci_save_state(dev->pdev); + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1469,6 +1520,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + goto parse_soc_bounding_box; + adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); @@ -1496,14 +1550,18 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->gfx.config.num_packer_per_sc = le32_to_cpu(gpu_info_fw->num_packer_per_sc); } -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + +parse_soc_bounding_box: + /* + * soc bounding box info is not integrated in disocovery table, + * we always need to parse it from gpu info firmware. + */ if (hdr->version_minor == 2) { const struct gpu_info_firmware_v1_2 *gpu_info_fw = (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; } -#endif break; } default: @@ -1613,6 +1671,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + amdgpu_discovery_get_gfx_info(adev); + amdgpu_amdkfd_device_probe(adev); if (amdgpu_sriov_vf(adev)) { @@ -1622,7 +1683,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } adev->pm.pp_feature = amdgpu_pp_feature_mask; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1816,6 +1877,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); @@ -1839,16 +1903,26 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + /* + * retired pages will be loaded from eeprom and reserved here, + * it should be called after amdgpu_device_ip_hw_init_phase2 since + * for some ASICs the RAS EEPROM code relies on SMU fully functioning + * for I2C communication which only true at this point. + * recovery_init may fail, but it can free all resources allocated by + * itself and its failure should not stop amdgpu init process. + * + * Note: theoretically, this should be called before all vram allocations + * to protect retired page from abusing + */ + amdgpu_ras_recovery_init(adev); + if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); init_failed: - if (amdgpu_sriov_vf(adev)) { - if (!r) - amdgpu_virt_init_data_exchange(adev); + if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, true); - } return r; } @@ -1887,6 +1961,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * amdgpu_device_set_cg_state - set clockgating for amdgpu device * * @adev: amdgpu_device pointer + * @state: clockgating state (gate or ungate) * * The list of all the hardware IPs that make up the asic is walked and the * set_clockgating_state callbacks are run. @@ -1911,6 +1986,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1941,6 +2017,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, @@ -2006,6 +2083,7 @@ out: */ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) { + struct amdgpu_gpu_instance *gpu_instance; int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -2031,8 +2109,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (r) DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); - /* set to low pstate by default */ - amdgpu_xgmi_set_pstate(adev, 0); + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + mutex_lock(&mgpu_info.mutex); + + /* + * Reset device p-state to low as this was booted with high. + * + * This should be performed only after all devices from the same + * hive get initialized. + * + * However, it's unknown how many device in the hive in advance. + * As this is counted one by one during devices initializations. + * + * So, we wait for all XGMI interlinked devices initialized. + * This may bring some delays as those devices may come from + * different hives. But that should be OK. + */ + if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { + for (i = 0; i < mgpu_info.num_gpu; i++) { + gpu_instance = &(mgpu_info.gpu_ins[i]); + if (gpu_instance->adev->flags & AMD_IS_APU) + continue; + + r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); + if (r) { + DRM_ERROR("pstate setting failed (%d).\n", r); + break; + } + } + } + + mutex_unlock(&mgpu_info.mutex); + } return 0; } @@ -2220,6 +2329,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* displays are handled in phase1 */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) continue; + /* PSP lost connection when err_event_athub occurs */ + if (amdgpu_ras_intr_triggered() && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { + adev->ip_blocks[i].status.hw = false; + continue; + } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -2231,17 +2346,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* handle putting the SMC in the appropriate state */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { if (is_support_sw_smu(adev)) { - /* todo */ + r = smu_set_mp1_state(&adev->smu, adev->mp1_state); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_mp1_state) { r = adev->powerplay.pp_funcs->set_mp1_state( adev->powerplay.pp_handle, adev->mp1_state); - if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); - return r; - } + } + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + return r; } } @@ -2511,20 +2626,19 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: #endif return amdgpu_dc != 0; #endif default: + if (amdgpu_dc > 0) + DRM_INFO("Display Core has been requested via kernel parameter " + "but isn't supported by ASIC, ignoring\n"); return false; } } @@ -2550,12 +2664,85 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) struct amdgpu_device *adev = container_of(__work, struct amdgpu_device, xgmi_reset_work); - adev->asic_reset_res = amdgpu_asic_reset(adev); + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + adev->asic_reset_res = (adev->in_baco == false) ? + amdgpu_device_baco_enter(adev->ddev) : + amdgpu_device_baco_exit(adev->ddev); + else + adev->asic_reset_res = amdgpu_asic_reset(adev); + if (adev->asic_reset_res) DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev->ddev->unique); } +static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) +{ + char *input = amdgpu_lockup_timeout; + char *timeout_setting = NULL; + int index = 0; + long timeout; + int ret = 0; + + /* + * By default timeout for non compute jobs is 10000. + * And there is no timeout enforced on compute jobs. + * In SR-IOV or passthrough mode, timeout for compute + * jobs are 10000 by default. + */ + adev->gfx_timeout = msecs_to_jiffies(10000); + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + else + adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + + if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; + + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + } else { + timeout = msecs_to_jiffies(timeout); + } + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; + } + } + /* + * There is only one value specified and + * it should apply to all non-compute jobs. + */ + if (index == 1) { + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + } + } + + return ret; +} /** * amdgpu_device_init - initialize the driver @@ -2575,7 +2762,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags) { int r, i; - bool runtime = false; + bool boco = false; u32 max_MBps; adev->shutdown = false; @@ -2583,7 +2770,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ddev = ddev; adev->pdev = pdev; adev->flags = flags; - adev->asic_type = flags & AMD_ASIC_MASK; + + if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) + adev->asic_type = amdgpu_force_asic_type; + else + adev->asic_type = flags & AMD_ASIC_MASK; + adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; if (amdgpu_emu_mode == 1) adev->usec_timeout *= 2; @@ -2633,7 +2825,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); mutex_init(&adev->lock_reset); - mutex_init(&adev->virt.dpm_mutex); mutex_init(&adev->psp.mutex); r = amdgpu_device_check_arguments(adev); @@ -2726,6 +2917,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + r = amdgpu_device_get_job_timeout_settings(adev); + if (r) { + dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); + return r; + } + /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -2734,12 +2931,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, * ignore it */ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); - if (amdgpu_device_is_px(ddev)) - runtime = true; - if (!pci_is_thunderbolt_attached(adev->pdev)) + if (amdgpu_device_supports_boco(ddev)) + boco = true; + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_register_client(adev->pdev, - &amdgpu_switcheroo_ops, runtime); - if (runtime) + &amdgpu_switcheroo_ops, boco); + if (boco) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); if (amdgpu_emu_mode == 1) { @@ -2826,8 +3026,6 @@ fence_driver_init: } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); - if (amdgpu_virt_request_full_gpu(adev, false)) - amdgpu_virt_release_full_gpu(adev, false); goto failed; } @@ -2845,16 +3043,19 @@ fence_driver_init: amdgpu_fbdev_init(adev); - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) - amdgpu_pm_virt_sysfs_init(adev); - r = amdgpu_pm_sysfs_init(adev); - if (r) + if (r) { + adev->pm_sysfs_en = false; DRM_ERROR("registering pm debugfs failed (%d).\n", r); + } else + adev->pm_sysfs_en = true; r = amdgpu_ucode_sysfs_init(adev); - if (r) + if (r) { + adev->ucode_sysfs_en = false; DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; r = amdgpu_debugfs_gem_init(adev); if (r) @@ -2885,6 +3086,13 @@ fence_driver_init: DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); } + /* + * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. + * Otherwise the mgpu fan boost feature will be skipped due to the + * gpu instance is counted less. + */ + amdgpu_register_gpu_instance(adev); + /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ @@ -2916,7 +3124,7 @@ fence_driver_init: failed: amdgpu_vf_error_trans_all(adev); - if (runtime) + if (boco) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2935,7 +3143,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) int r; DRM_INFO("amdgpu: finishing device.\n"); + flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ @@ -2945,7 +3155,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) drm_atomic_helper_shutdown(adev->ddev); } amdgpu_fence_driver_fini(adev); - amdgpu_pm_sysfs_fini(adev); + if (adev->pm_sysfs_en) + amdgpu_pm_sysfs_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_device_ip_fini(adev); if (adev->firmware.gpu_info_fw) { @@ -2953,7 +3164,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; } adev->accel_working = false; - cancel_delayed_work_sync(&adev->delayed_init_work); /* free i2c buses */ if (!amdgpu_device_has_dc_support(adev)) amdgpu_i2c_fini(adev); @@ -2963,9 +3173,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; - if (!pci_is_thunderbolt_attached(adev->pdev)) + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_unregister_client(adev->pdev); - if (adev->flags & AMD_IS_PX) + if (amdgpu_device_supports_boco(adev->ddev)) vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) @@ -2974,12 +3187,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev) iounmap(adev->rmmio); adev->rmmio = NULL; amdgpu_device_doorbell_fini(adev); - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) - amdgpu_pm_virt_sysfs_fini(adev); amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); - amdgpu_ucode_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); amdgpu_debugfs_preempt_cleanup(adev); @@ -3002,11 +3214,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; + struct drm_connector_list_iter iter; int r; if (dev == NULL || dev->dev_private == NULL) { @@ -3029,9 +3242,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); drm_modeset_unlock_all(dev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -3082,17 +3297,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - pci_save_state(dev->pdev); - if (suspend) { - /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); - } else { - r = amdgpu_asic_reset(adev); - if (r) - DRM_ERROR("amdgpu asic reset failed\n"); - } - return 0; } @@ -3107,9 +3311,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_device *adev = dev->dev_private; struct drm_crtc *crtc; int r = 0; @@ -3117,14 +3322,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (resume) { - pci_set_power_state(dev->pdev, PCI_D0); - pci_restore_state(dev->pdev); - r = pci_enable_device(dev->pdev); - if (r) - return r; - } - /* post card */ if (amdgpu_device_need_post(adev)) { r = amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -3180,9 +3377,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) /* turn on display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); } amdgpu_fbdev_set_suspend(adev, 0); @@ -3466,6 +3667,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) goto error; + amdgpu_virt_init_data_exchange(adev); /* we need recover gart prior to run SMC/CP/SDMA resume */ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); @@ -3483,7 +3685,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_amdkfd_post_reset(adev); error: - amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { amdgpu_inc_vram_lost(adev); @@ -3589,13 +3790,18 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, return r; } -static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, +static int amdgpu_do_asic_reset(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, struct list_head *device_list_handle, bool *need_full_reset_arg) { struct amdgpu_device *tmp_adev = NULL; bool need_full_reset = *need_full_reset_arg, vram_lost = false; int r = 0; + int cpu = smp_processor_id(); + bool use_baco = + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? + true : false; /* * ASIC reset has to be done on all HGMI hive nodes ASAP @@ -3603,21 +3809,24 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, */ if (need_full_reset) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - /* For XGMI run all resets in parallel to speed up the process */ + /* + * For XGMI run all resets in parallel to speed up the + * process by scheduling the highpri wq on different + * cpus. For XGMI with baco reset, all nodes must enter + * baco within close proximity before anyone exit. + */ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { - if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) + if (!queue_work_on(cpu, system_highpri_wq, + &tmp_adev->xgmi_reset_work)) r = -EALREADY; + cpu = cpumask_next(cpu, cpu_online_mask); } else r = amdgpu_asic_reset(tmp_adev); - - if (r) { - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", - r, tmp_adev->ddev->unique); + if (r) break; - } } - /* For XGMI wait for all PSP resets to complete before proceed */ + /* For XGMI wait for all work to complete before proceed */ if (!r) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { @@ -3626,16 +3835,57 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, r = tmp_adev->asic_reset_res; if (r) break; + if (use_baco) + tmp_adev->in_baco = true; + } + } + } + + /* + * For XGMI with baco reset, need exit baco phase by scheduling + * xgmi_reset_work one more time. PSP reset and sGPU skips this + * phase. Not assume the situation that PSP reset and baco reset + * coexist within an XGMI hive. + */ + + if (!r && use_baco) { + cpu = smp_processor_id(); + list_for_each_entry(tmp_adev, device_list_handle, + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work_on(cpu, + system_highpri_wq, + &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + if (r) + break; + cpu = cpumask_next(cpu, cpu_online_mask); } } + } + if (!r && use_baco) { list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - amdgpu_ras_reserve_bad_pages(tmp_adev); + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + tmp_adev->in_baco = false; + } } } + + if (r) { + DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", + r, tmp_adev->ddev->unique); + goto end; + } } + if (!r && amdgpu_ras_intr_triggered()) + amdgpu_ras_intr_cleared(); list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { @@ -3736,25 +3986,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) adev->mp1_state = PP_MP1_STATE_NONE; break; } - /* Block kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_pre_reset(adev); return true; } static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { - /*unlock kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); } - /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -3774,11 +4017,28 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_hive_info *hive = NULL; struct amdgpu_device *tmp_adev = NULL; int i, r = 0; + bool in_ras_intr = amdgpu_ras_intr_triggered(); + bool use_baco = + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? + true : false; + + /* + * Flush RAM to disk so that after reboot + * the user can read log and see why the system rebooted. + */ + if (in_ras_intr && !use_baco && amdgpu_ras_get_context(adev)->reboot) { + + DRM_WARN("Emergency reboot."); + + ksys_sync_helper(); + emergency_restart(); + } need_full_reset = job_signaled = false; INIT_LIST_HEAD(&device_list); - dev_info(adev->dev, "GPU reset begin!\n"); + dev_info(adev->dev, "GPU %s begin!\n", + (in_ras_intr && !use_baco) ? "jobs stop":"reset"); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -3805,9 +4065,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, return 0; } + /* Block kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_pre_reset(adev); + /* Build list of devices to reset */ if (adev->gmc.xgmi.num_physical_nodes > 1) { if (!hive) { + /*unlock kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_post_reset(adev); amdgpu_device_unlock_adev(adev); return -ENODEV; } @@ -3823,17 +4090,23 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } - /* - * Mark these ASICs to be reseted as untracked first - * And add them back after reset completed - */ - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) - amdgpu_unregister_gpu_instance(tmp_adev); - /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev != adev) { + amdgpu_device_lock_adev(tmp_adev, false); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); + } + + /* + * Mark these ASICs to be reseted as untracked first + * And add them back after reset completed + */ + amdgpu_unregister_gpu_instance(tmp_adev); + /* disable ras on ALL IPs */ - if (amdgpu_device_ip_need_full_reset(tmp_adev)) + if (!(in_ras_intr && !use_baco) && + amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -3843,10 +4116,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); + + if (in_ras_intr && !use_baco) + amdgpu_job_stop_all_jobs_on_sched(&ring->sched); } } + if (in_ras_intr && !use_baco) + goto skip_sched_resume; + /* * Must check guilty signal here since after this point all old * HW fences are force signaled. @@ -3857,9 +4136,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, dma_fence_is_signaled(job->base.s_fence->parent)) job_signaled = true; - if (!amdgpu_device_ip_need_full_reset(adev)) - device_list_handle = &device_list; - if (job_signaled) { dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -3881,7 +4157,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (tmp_adev == adev) continue; - amdgpu_device_lock_adev(tmp_adev, false); r = amdgpu_device_pre_asic_reset(tmp_adev, NULL, &need_full_reset); @@ -3900,7 +4175,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (r) adev->asic_reset_res = r; } else { - r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); + r = amdgpu_do_asic_reset(adev, hive, device_list_handle, + &need_full_reset); if (r && r == -EAGAIN) goto retry; } @@ -3909,6 +4185,7 @@ skip_hw_reset: /* Post ASIC reset for all devs .*/ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -3930,12 +4207,18 @@ skip_hw_reset: if (r) { /* bad news, how to tell it to userspace ? */ - dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { - dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); } + } +skip_sched_resume: + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /*unlock kfd: SRIOV would do it separately */ + if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_post_reset(tmp_adev); amdgpu_device_unlock_adev(tmp_adev); } @@ -4083,3 +4366,69 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } +int amdgpu_device_baco_enter(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!amdgpu_device_supports_baco(adev->ddev)) + return -ENOTSUPP; + + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); + + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; + int ret; + + ret = smu_baco_enter(smu); + if (ret) + return ret; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + } + + return 0; +} + +int amdgpu_device_baco_exit(struct drm_device *dev) +{ + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (!amdgpu_device_supports_baco(adev->ddev)) + return -ENOTSUPP; + + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; + int ret; + + ret = smu_baco_exit(smu); + if (ret) + return ret; + + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + } + + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 1481899f86c1..f95092741c38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = { static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) { - uint32_t *p = (uint32_t *)binary; uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - uint64_t pos = vram_size - BINARY_MAX_SIZE; - unsigned long flags; - - while (pos < vram_size) { - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); - *p++ = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - pos += 4; - } + uint64_t pos = vram_size - DISCOVERY_TMR_SIZE; + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false); return 0; } @@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) uint16_t checksum; int r; - adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL); + adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); if (!adev->discovery) return -ENOMEM; @@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) } int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, - int *major, int *minor) + int *major, int *minor, int *revision) { struct binary_header *bhdr; struct ip_discovery_header *ihdr; @@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, *major = ip->major; if (minor) *minor = ip->minor; + if (revision) + *revision = ip->revision; return 0; } ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 85b8c4d4d576..ba78e15d9b05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,11 +24,13 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#define DISCOVERY_TMR_SIZE (64 << 10) + int amdgpu_discovery_init(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, - int *major, int *minor); + int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 1d4aaa9580f4..4e699071d144 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; + struct drm_connector_list_iter iter; uint32_t devices; int i = 0; + drm_connector_list_iter_begin(dev, &iter); DRM_INFO("AMDGPU Display Connectors\n"); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); DRM_INFO("Connector %d:\n", i); DRM_INFO(" %s\n", connector->name); @@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) } i++; } + drm_connector_list_iter_end(&iter); } /** @@ -511,7 +514,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, * Also, don't allow GTT domain if the BO doens't have USWC falg set. */ if (adev->asic_type >= CHIP_CARRIZO && - adev->asic_type <= CHIP_RAVEN && + adev->asic_type < CHIP_RAVEN && (adev->flags & AMD_IS_APU) && (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && amdgpu_bo_support_uswc(bo_flags) && @@ -687,7 +690,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_encoder *amdgpu_encoder; struct drm_connector *connector; - struct amdgpu_connector *amdgpu_connector; u32 src_v = 1, dst_v = 1; u32 src_h = 1, dst_h = 1; @@ -699,7 +701,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, continue; amdgpu_encoder = to_amdgpu_encoder(encoder); connector = amdgpu_get_connector_for_encoder(encoder); - amdgpu_connector = to_amdgpu_connector(connector); /* set scaling */ if (amdgpu_encoder->rmx_type == RMX_OFF) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 586db4fb46bd..a59cd47aa6c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -341,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = { /** * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation - * @dev: DRM device * @gobj: GEM BO * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 5803fcbae22f..9cc270efee7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); + !low ? &clk_freq : NULL, + true); if (ret) return 0; return clk_freq * 100; @@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); + !low ? &clk_freq : NULL, + true); if (ret) return 0; return clk_freq * 100; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 1c5c0fd76dbf..2cfb677272af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -298,12 +298,6 @@ enum amdgpu_pcie_gen { #define amdgpu_dpm_get_current_power_state(adev) \ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) -#define amdgpu_smu_get_current_power_state(adev) \ - ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu))) - -#define amdgpu_smu_set_power_state(adev) \ - ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu))) - #define amdgpu_dpm_get_pp_num_states(adev, data) \ ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 25adf2b847e8..3f6f14ce1511 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -43,6 +43,8 @@ #include "amdgpu_amdkfd.h" +#include "amdgpu_ras.h" + /* * KMS wrapper. * - 3.0.0 - initial driver @@ -82,13 +84,12 @@ * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS. * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask + * - 3.36.0 - Allow reading more status registers on si/cik */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 35 +#define KMS_DRIVER_MINOR 36 #define KMS_DRIVER_PATCHLEVEL 0 -#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256 - int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; int amdgpu_gart_size = -1; /* auto */ @@ -101,7 +102,7 @@ int amdgpu_disp_priority = 0; int amdgpu_hw_i2c = 0; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; -char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH]; +char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; int amdgpu_dpm = -1; int amdgpu_fw_load_type = -1; int amdgpu_aspm = -1; @@ -128,11 +129,7 @@ char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; /* OverDrive(bit 14) disabled by default*/ uint amdgpu_pp_feature_mask = 0xffffbfff; -int amdgpu_ngg = 0; -int amdgpu_prim_buf_per_se = 0; -int amdgpu_pos_buf_per_se = 0; -int amdgpu_cntl_sb_buf_per_se = 0; -int amdgpu_param_buf_per_se = 0; +uint amdgpu_force_long_training = 0; int amdgpu_job_hang_limit = 0; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; @@ -146,12 +143,13 @@ int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; int amdgpu_noretry = 1; +int amdgpu_force_asic_type = -1; struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xfffffffb; +uint amdgpu_ras_mask = 0xffffffff; /** * DOC: vramlimit (int) @@ -244,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444); * * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or * multiple values specified. 0 and negative values are invalidated. They will be adjusted - * to default timeout. - * - With one value specified, the setting will apply to all non-compute jobs. - * - With multiple values specified, the first one will be for GFX. The second one is for Compute. - * And the third and fourth ones are for SDMA and Video. + * to the default timeout. + * + * - With one value specified, the setting will apply to all non-compute jobs. + * - With multiple values specified, the first one will be for GFX. + * The second one is for Compute. The third and fourth ones are + * for SDMA and Video. + * * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) * jobs is 10000. And there is no timeout enforced on compute jobs. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs." +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; " + "for passthrough or sriov, 10000 for all jobs." " 0: keep default value. negative: infinity timeout), " - "format is [Non-Compute] or [GFX,Compute,SDMA,Video]"); + "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); /** @@ -392,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); /** + * DOC: forcelongtraining (uint) + * Force long memory training in resume. + * The default is zero, indicates short training in resume. + */ +MODULE_PARM_DESC(forcelongtraining, "force memory long training"); +module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444); + +/** * DOC: pcie_gen_cap (uint) * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h. * The default is 0 (automatic for each asic). @@ -449,42 +460,6 @@ MODULE_PARM_DESC(virtual_display, module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); /** - * DOC: ngg (int) - * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled). - */ -MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))"); -module_param_named(ngg, amdgpu_ngg, int, 0444); - -/** - * DOC: prim_buf_per_se (int) - * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)"); -module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444); - -/** - * DOC: pos_buf_per_se (int) - * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)"); -module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444); - -/** - * DOC: cntl_sb_buf_per_se (int) - * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)"); -module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444); - -/** - * DOC: param_buf_per_se (int) - * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte. - * The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)"); -module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); - -/** * DOC: job_hang_limit (int) * Set how much time allow a job hang and not drop it. The default is 0. */ @@ -616,6 +591,16 @@ MODULE_PARM_DESC(noretry, "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); module_param_named(noretry, amdgpu_noretry, int, 0644); +/** + * DOC: force_asic_type (int) + * A non negative value used to specify the asic type for all supported GPUs. + */ +MODULE_PARM_DESC(force_asic_type, + "A non negative value used to specify the asic type for all supported GPUs"); +module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444); + + + #ifdef CONFIG_HSA_AMD /** * DOC: sched_policy (int) @@ -1013,15 +998,17 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, /* Navi14 */ - {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, {0, 0, 0} }; @@ -1127,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); +#ifdef MODULE + if (THIS_MODULE->state != MODULE_STATE_GOING) +#endif + DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); drm_dev_put(dev); pci_disable_device(pdev); @@ -1140,6 +1130,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = dev->dev_private; + if (amdgpu_ras_intr_triggered()) + return; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain @@ -1154,7 +1147,7 @@ static int amdgpu_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_suspend(drm_dev, true, true); + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_resume(struct device *dev) @@ -1162,66 +1155,82 @@ static int amdgpu_pmops_resume(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); /* GPU comes up enabled by the bios on resume */ - if (amdgpu_device_is_px(drm_dev)) { + if (amdgpu_device_supports_boco(drm_dev) || + amdgpu_device_supports_baco(drm_dev)) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } - return amdgpu_device_resume(drm_dev, true, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_freeze(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_dev->dev_private; + int r; - return amdgpu_device_suspend(drm_dev, false, true); + r = amdgpu_device_suspend(drm_dev, true); + if (r) + return r; + return amdgpu_asic_reset(adev); } static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_resume(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_suspend(drm_dev, true, true); + return amdgpu_device_suspend(drm_dev, true); } static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - return amdgpu_device_resume(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; int ret; - if (!amdgpu_device_is_px(drm_dev)) { + if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + if (amdgpu_device_supports_boco(drm_dev)) + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); - ret = amdgpu_device_suspend(drm_dev, false, false); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_ignore_hotplug(pdev); - if (amdgpu_is_atpx_hybrid()) - pci_set_power_state(pdev, PCI_D3cold); - else if (!amdgpu_has_atpx_dgpu_power_cntl()) - pci_set_power_state(pdev, PCI_D3hot); - drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + ret = amdgpu_device_suspend(drm_dev, false); + if (amdgpu_device_supports_boco(drm_dev)) { + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ + if (amdgpu_is_atpx_hybrid()) { + pci_ignore_hotplug(pdev); + } else { + pci_save_state(pdev); + pci_disable_device(pdev); + pci_ignore_hotplug(pdev); + pci_set_power_state(pdev, PCI_D3cold); + } + drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; + } else if (amdgpu_device_supports_baco(drm_dev)) { + amdgpu_device_baco_enter(drm_dev); + } return 0; } @@ -1230,34 +1239,45 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_dev->dev_private; int ret; - if (!amdgpu_device_is_px(drm_dev)) + if (!adev->runpm) return -EINVAL; - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + if (amdgpu_device_supports_boco(drm_dev)) { + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (amdgpu_is_atpx_hybrid() || - !amdgpu_has_atpx_dgpu_power_cntl()) - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - pci_set_master(pdev); - - ret = amdgpu_device_resume(drm_dev, false, false); + /* Only need to handle PCI state in the driver for ATPX + * PCI core handles it for _PR3. + */ + if (amdgpu_is_atpx_hybrid()) { + pci_set_master(pdev); + } else { + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + } + } else if (amdgpu_device_supports_baco(drm_dev)) { + amdgpu_device_baco_exit(drm_dev); + } + ret = amdgpu_device_resume(drm_dev, false); drm_kms_helper_poll_enable(drm_dev); - drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; + if (amdgpu_device_supports_boco(drm_dev)) + drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; return 0; } static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_dev->dev_private; struct drm_crtc *crtc; - if (!amdgpu_device_is_px(drm_dev)) { + if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } @@ -1347,66 +1367,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) -{ - char *input = amdgpu_lockup_timeout; - char *timeout_setting = NULL; - int index = 0; - long timeout; - int ret = 0; - - /* - * By default timeout for non compute jobs is 10000. - * And there is no timeout enforced on compute jobs. - */ - adev->gfx_timeout = msecs_to_jiffies(10000); - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; - - if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { - while ((timeout_setting = strsep(&input, ",")) && - strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { - ret = kstrtol(timeout_setting, 0, &timeout); - if (ret) - return ret; - - if (timeout == 0) { - index++; - continue; - } else if (timeout < 0) { - timeout = MAX_SCHEDULE_TIMEOUT; - } else { - timeout = msecs_to_jiffies(timeout); - } - - switch (index++) { - case 0: - adev->gfx_timeout = timeout; - break; - case 1: - adev->compute_timeout = timeout; - break; - case 2: - adev->sdma_timeout = timeout; - break; - case 3: - adev->video_timeout = timeout; - break; - default: - break; - } - } - /* - * There is only one value specified and - * it should apply to all non-compute jobs. - */ - if (index == 1) - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - } - - return ret; -} - static bool amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 571a6dfb473e..61fcf247a638 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; + drm_connector_list_iter_begin(dev, &iter); /* walk the list and link encoders to connectors */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev) } } } + drm_connector_list_iter_end(&iter); } void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) @@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices; @@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) amdgpu_connector->devices, encoder->encoder_type); } } + drm_connector_list_iter_end(&iter); } struct drm_connector * @@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector; + struct drm_connector *connector, *found = NULL; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_encoder->active_device & amdgpu_connector->devices) - return connector; + if (amdgpu_encoder->active_device & amdgpu_connector->devices) { + found = connector; + break; + } } - return NULL; + drm_connector_list_iter_end(&iter); + return found; } struct drm_connector * @@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector; + struct drm_connector *connector, *found = NULL; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_encoder->devices & amdgpu_connector->devices) - return connector; + if (amdgpu_encoder->devices & amdgpu_connector->devices) { + found = connector; + break; + } } - return NULL; + drm_connector_list_iter_end(&iter); + return found; } struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 23085b352cf2..377fe20bce23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, timeout = adev->gfx_timeout; break; case AMDGPU_RING_TYPE_COMPUTE: - /* - * For non-sriov case, no timeout enforce - * on compute ring by default. Unless user - * specifies a timeout for compute ring. - * - * For sriov case, always use the timeout - * as gfx ring - */ - if (!amdgpu_sriov_vf(ring->adev)) - timeout = adev->compute_timeout; - else - timeout = adev->gfx_timeout; + timeout = adev->compute_timeout; break; case AMDGPU_RING_TYPE_SDMA: timeout = adev->sdma_timeout; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 19705e399905..e01e681d2a60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -302,6 +302,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, * @pages: number of pages to bind * @pagelist: pages to bind * @dma_addr: DMA addresses of pages + * @flags: page table entry flags * * Binds the requested pages to the gart page table * (all asics). diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index c40642a91bd4..4277125a79ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, goto error; } - r = amdgpu_vm_update_directories(adev, vm); + r = amdgpu_vm_update_pdes(adev, vm, false); error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } +/** + * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags + * + * @adev: amdgpu_device pointer + * @flags: GEM UAPI flags + * + * Returns the GEM UAPI flags mapped into hardware for the ASIC. + */ +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) +{ + uint64_t pte_flag = 0; + + if (flags & AMDGPU_VM_PAGE_EXECUTABLE) + pte_flag |= AMDGPU_PTE_EXECUTABLE; + if (flags & AMDGPU_VM_PAGE_READABLE) + pte_flag |= AMDGPU_PTE_READABLE; + if (flags & AMDGPU_VM_PAGE_WRITEABLE) + pte_flag |= AMDGPU_PTE_WRITEABLE; + if (flags & AMDGPU_VM_PAGE_PRT) + pte_flag |= AMDGPU_PTE_PRT; + + if (adev->gmc.gmc_funcs->map_mtype) + pte_flag |= amdgpu_gmc_map_mtype(adev, + flags & AMDGPU_VM_MTYPE_MASK); + + return pte_flag; +} + int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, va_flags); @@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, va_flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 0b66d2e6b5d5..e0f025dd1b14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags); int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f9bef3154b99..e00b46180d2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" +#include "amdgpu_ras.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe, me; + int i, queue, me; for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) { queue = i % adev->gfx.me.num_queue_per_pipe; - pipe = (i / adev->gfx.me.num_queue_per_pipe) - % adev->gfx.me.num_pipe_per_me; me = (i / adev->gfx.me.num_queue_per_pipe) / adev->gfx.me.num_pipe_per_me; @@ -320,8 +319,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, return r; } -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq) +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); amdgpu_ring_fini(ring); @@ -456,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) } ring = &adev->gfx.kiq.ring; - if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) - kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, @@ -569,3 +565,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) mutex_unlock(&adev->gfx.gfx_off_mutex); } + +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "gfx_err_count", + .debugfs_name = "gfx_err_inject", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_gfx_process_ras_data_cb, + }; + + if (!adev->gfx.ras_if) { + adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->gfx.ras_if) + return -ENOMEM; + adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; + adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gfx.ras_if->sub_block_index = 0; + strcpy(adev->gfx.ras_if->name, "gfx"); + } + fs_info.head = ih_info.head = *adev->gfx.ras_if; + + r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } else { + /* free gfx ras_if if ras is not supported */ + r = 0; + goto free; + } + + return 0; +late_fini: + amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); +free: + kfree(adev->gfx.ras_if); + adev->gfx.ras_if = NULL; + return r; +} + +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && + adev->gfx.ras_if) { + struct ras_common_if *ras_if = adev->gfx.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_gfx_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + /* TODO ue will trigger an interrupt. + * + * When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->gfx.funcs->query_ras_error_count) + adev->gfx.funcs->query_ras_error_count(adev, err_data); + amdgpu_ras_reset_gpu(adev, 0); + } + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->gfx.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + DRM_ERROR("CP ECC ERROR IRQ\n"); + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6ee4021910e2..0ae0a2715b0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs { int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); }; -struct amdgpu_ngg_buf { - struct amdgpu_bo *bo; - uint64_t gpu_addr; - uint32_t size; - uint32_t bo_size; -}; - -enum { - NGG_PRIM = 0, - NGG_POS, - NGG_CNTL, - NGG_PARAM, - NGG_BUF_MAX -}; - -struct amdgpu_ngg { - struct amdgpu_ngg_buf buf[NGG_BUF_MAX]; - uint32_t gds_reserve_addr; - uint32_t gds_reserve_size; - bool init; -}; - struct sq_work { struct work_struct work; unsigned ih_data; @@ -247,7 +225,7 @@ struct amdgpu_me { uint32_t num_me; uint32_t num_pipe_per_me; uint32_t num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1]; + void *mqd_backup[AMDGPU_MAX_GFX_RINGS]; /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); @@ -289,6 +267,7 @@ struct amdgpu_gfx { uint32_t mec2_feature_version; bool mec_fw_write_wait; bool me_fw_write_wait; + bool cp_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; @@ -311,9 +290,6 @@ struct amdgpu_gfx { uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; - /* NGG */ - struct amdgpu_ngg ngg; - /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ struct mutex gfx_off_mutex; @@ -355,8 +331,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_irq_src *irq); -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, @@ -384,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); - +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); +int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 5790db61fa2c..a12f33c0f5df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -27,6 +27,8 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include "amdgpu.h" +#include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" /** * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO @@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, gmc->fault_hash[hash].idx = gmc->last_fault++; return false; } + +int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->umc.funcs && adev->umc.funcs->ras_late_init) { + r = adev->umc.funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) { + r = adev->mmhub.funcs->ras_late_init(adev); + if (r) + return r; + } + + return amdgpu_xgmi_ras_late_init(adev); +} + +void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) +{ + amdgpu_umc_ras_fini(adev); + amdgpu_mmhub_ras_fini(adev); + amdgpu_xgmi_ras_fini(adev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b6e1d98ef01e..b499a3de8bb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -77,6 +77,7 @@ struct amdgpu_gmc_fault { struct amdgpu_vmhub { uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_hi32; + uint32_t vm_inv_eng0_sem; uint32_t vm_inv_eng0_req; uint32_t vm_inv_eng0_ack; uint32_t vm_context0_cntl; @@ -99,12 +100,15 @@ struct amdgpu_gmc_funcs { unsigned pasid); /* enable/disable PRT support */ void (*set_prt)(struct amdgpu_device *adev, bool enable); - /* set pte flags based per asic */ - uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, - uint32_t flags); + /* map mtype to hardware flags */ + uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags); /* get the pde for a given mc addr */ void (*get_vm_pde)(struct amdgpu_device *adev, int level, u64 *dst, u64 *flags); + /* get the pte flags to use for a BO VA mapping */ + void (*get_vm_pte)(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags); }; struct amdgpu_xgmi { @@ -120,21 +124,52 @@ struct amdgpu_xgmi { /* gpu list in the same hive */ struct list_head head; bool supported; + struct ras_common_if *ras_if; }; struct amdgpu_gmc { + /* FB's physical address in MMIO space (for CPU to + * map FB). This is different compared to the agp/ + * gart/vram_start/end field as the later is from + * GPU's view and aper_base is from CPU's view. + */ resource_size_t aper_size; resource_size_t aper_base; /* for some chips with <= 32MB we need to lie * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; + /* AGP aperture start and end in MC address space + * Driver find a hole in the MC address space + * to place AGP by setting MC_VM_AGP_BOT/TOP registers + * Under VMID0, logical address == MC address. AGP + * aperture maps to physical bus or IOVA addressed. + * AGP aperture is used to simulate FB in ZFB case. + * AGP aperture is also used for page table in system + * memory (mainly for APU). + * + */ u64 agp_size; u64 agp_start; u64 agp_end; + /* GART aperture start and end in MC address space + * Driver find a hole in the MC address space + * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR + * registers + * Under VMID0, logical address inside GART aperture will + * be translated through gpuvm gart page table to access + * paged system memory + */ u64 gart_size; u64 gart_start; u64 gart_end; + /* Frame buffer aperture of this GPU device. Different from + * fb_start (see below), this only covers the local GPU device. + * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios) + * and calculate vram_start of this local device by adding an + * offset inside the XGMI hive. + * Under VMID0, logical address == MC address + */ u64 vram_start; u64 vram_end; /* FB region , it's same as local vram region in single GPU, in XGMI @@ -153,6 +188,7 @@ struct amdgpu_gmc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint8_t vram_vendor; uint32_t srbm_soft_reset; bool prt_warning; uint64_t stolen_size; @@ -177,15 +213,14 @@ struct amdgpu_gmc { struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; - struct ras_common_if *umc_ras_if; - struct ras_common_if *mmhub_ras_if; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) +#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) -#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) +#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR @@ -230,5 +265,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp); +int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); +void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 53734da1c2df..3a67f6c046d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -206,7 +206,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, int r; if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait)) - return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false); + return amdgpu_sync_fence(sync, ring->vmid_wait, false); fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); if (!fences) @@ -241,7 +241,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_sync_fence(adev, sync, &array->base, false); + r = amdgpu_sync_fence(sync, &array->base, false); dma_fence_put(ring->vmid_wait); ring->vmid_wait = &array->base; return r; @@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, !dma_fence_is_later(updates, (*id)->flushed_updates)) updates = NULL; - if ((*id)->owner != vm->entity.fence_context || + if ((*id)->owner != vm->direct.fence_context || job->vm_pd_addr != (*id)->pd_gpu_addr || updates || !(*id)->last_flush || ((*id)->last_flush->context != fence_context && @@ -294,7 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); if (tmp) { *id = NULL; - r = amdgpu_sync_fence(adev, sync, tmp, false); + r = amdgpu_sync_fence(sync, tmp, false); return r; } needs_flush = true; @@ -303,7 +303,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); + r = amdgpu_sync_fence(&(*id)->active, fence, false); if (r) return r; @@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct dma_fence *flushed; /* Check all the prerequisites to using this VMID */ - if ((*id)->owner != vm->entity.fence_context) + if ((*id)->owner != vm->direct.fence_context) continue; if ((*id)->pd_gpu_addr != job->vm_pd_addr) @@ -375,7 +375,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, /* Good, we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false); + r = amdgpu_sync_fence(&(*id)->active, fence, false); if (r) return r; @@ -435,8 +435,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id = idle; /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(ring->adev, &id->active, - fence, false); + r = amdgpu_sync_fence(&id->active, fence, false); if (r) goto error; @@ -449,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } id->pd_gpu_addr = job->vm_pd_addr; - id->owner = vm->entity.fence_context; + id->owner = vm->direct.fence_context; if (job->vm_needs_flush) { dma_fence_put(id->last_flush); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 6d8f05511aba..111a301ce878 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -66,7 +66,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, if (ih->ring == NULL) return -ENOMEM; - memset((void *)ih->ring, 0, ih->ring_size + 8); ih->gpu_addr = dma_addr; ih->wptr_addr = dma_addr + ih->ring_size; ih->wptr_cpu = &ih->ring[ih->ring_size / 4]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2a3f5ec298db..5ed4227f304b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -55,6 +55,7 @@ #include "amdgpu_connectors.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_ras.h" #include <linux/pm_runtime.h> @@ -87,10 +88,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) struct drm_device *dev = adev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + struct drm_connector_list_iter iter; mutex_lock(&mode_config->mutex); - list_for_each_entry(connector, &mode_config->connector_list, head) + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) amdgpu_connector_hotplug(connector); + drm_connector_list_iter_end(&iter); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); @@ -153,6 +157,22 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) ret = amdgpu_ih_process(adev, &adev->irq.ih); if (ret == IRQ_HANDLED) pm_runtime_mark_last_busy(dev->dev); + + /* For the hardware that cannot enable bif ring for both ras_controller_irq + * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status + * register to check whether the interrupt is triggered or not, and properly + * ack the interrupt if it is there + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) { + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_controller_intr_no_bifring) + adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); + + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + } + return ret; } @@ -228,10 +248,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.msi_enabled = false; if (amdgpu_msi_ok(adev)) { - int ret = pci_enable_msi(adev->pdev); - if (!ret) { + int nvec = pci_msix_vec_count(adev->pdev); + unsigned int flags; + + if (nvec <= 0) { + flags = PCI_IRQ_MSI; + } else { + flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; + } + /* we only need one vector */ + nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); + if (nvec > 0) { adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "amdgpu: using MSI.\n"); + dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n"); } } @@ -254,7 +283,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev) INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); adev->irq.installed = true; - r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); + /* Use vector 0 for MSI-X */ + r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0)); if (r) { adev->irq.installed = false; if (!amdgpu_device_has_dc_support(adev)) @@ -284,7 +314,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) drm_irq_uninstall(adev->ddev); adev->irq.installed = false; if (adev->irq.msi_enabled) - pci_disable_msi(adev->pdev); + pci_free_irq_vectors(adev->pdev); if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); } @@ -369,7 +399,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, * amdgpu_irq_dispatch - dispatch IRQ to IP blocks * * @adev: amdgpu device pointer - * @entry: interrupt vector pointer + * @ih: interrupt ring instance * * Dispatches IRQ to IP blocks. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 9d76e0923a5a..73328d0c741d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -193,8 +193,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, fence = amdgpu_sync_get_fence(&job->sync, &explicit); if (fence && explicit) { if (drm_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(ring->adev, &job->sched_sync, - fence, false); + r = amdgpu_sync_fence(&job->sched_sync, fence, false); if (r) DRM_ERROR("Error adding fence (%d)\n", r); } @@ -218,7 +217,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); struct dma_fence *fence = NULL, *finished; struct amdgpu_job *job; - int r; + int r = 0; job = to_amdgpu_job(sched_job); finished = &job->base.s_fence->finished; @@ -243,9 +242,49 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) job->fence = dma_fence_get(fence); amdgpu_job_free_resources(job); + + fence = r ? ERR_PTR(r) : fence; return fence; } +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *s_job; + struct drm_sched_entity *s_entity = NULL; + int i; + + /* Signal all jobs not yet scheduled */ + for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + struct drm_sched_rq *rq = &sched->sched_rq[i]; + + if (!rq) + continue; + + spin_lock(&rq->lock); + list_for_each_entry(s_entity, &rq->entities, list) { + while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { + struct drm_sched_fence *s_fence = s_job->s_fence; + + dma_fence_signal(&s_fence->scheduled); + dma_fence_set_error(&s_fence->finished, -EHWPOISON); + dma_fence_signal(&s_fence->finished); + } + } + spin_unlock(&rq->lock); + } + + /* Signal all jobs already scheduled to HW */ + list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + struct drm_sched_fence *s_fence = s_job->s_fence; + + dma_fence_set_error(&s_fence->finished, -EHWPOISON); + dma_fence_signal(&s_fence->finished); + } +} + const struct drm_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 51e62504c279..dc7ee9358dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f); int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence); + +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c new file mode 100644 index 000000000000..5727f00afc8e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -0,0 +1,211 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15d.h" +#include "soc15_common.h" + +#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000) + +static void amdgpu_jpeg_idle_work_handler(struct work_struct *work); + +int amdgpu_jpeg_sw_init(struct amdgpu_device *adev) +{ + INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler); + + return 0; +} + +int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev) +{ + int i; + + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec); + } + + return 0; +} + +int amdgpu_jpeg_suspend(struct amdgpu_device *adev) +{ + cancel_delayed_work_sync(&adev->jpeg.idle_work); + + return 0; +} + +int amdgpu_jpeg_resume(struct amdgpu_device *adev) +{ + return 0; +} + +static void amdgpu_jpeg_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, jpeg.idle_work.work); + unsigned int fences = 0; + unsigned int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); + } + + if (fences == 0) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_GATE); + else + schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); +} + +void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work); + + if (set_clocks) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG, + AMD_PG_STATE_UNGATE); +} + +void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT); +} + +int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 3); + if (r) + return r; + + amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + +static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, + struct dma_fence **fence) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_job *job; + struct amdgpu_ib *ib; + struct dma_fence *f = NULL; + const unsigned ib_size_dw = 16; + int i, r; + + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + if (r) + return r; + + ib = &job->ibs[0]; + + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); + ib->ptr[1] = 0xDEADBEEF; + for (i = 2; i < 16; i += 2) { + ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); + ib->ptr[i+1] = 0; + } + ib->length_dw = 16; + + r = amdgpu_job_submit_direct(job, ring, &f); + if (r) + goto err; + + if (fence) + *fence = dma_fence_get(f); + dma_fence_put(f); + + return 0; + +err: + amdgpu_job_free(job); + return r; +} + +int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + struct dma_fence *fence = NULL; + long r = 0; + + r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence); + if (r) + goto error; + + r = dma_fence_wait_timeout(fence, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto error; + } else if (r < 0) { + goto error; + } else { + r = 0; + } + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + dma_fence_put(fence); +error: + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h new file mode 100644 index 000000000000..5131a0a1bc8a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -0,0 +1,62 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_JPEG_H__ +#define __AMDGPU_JPEG_H__ + +#define AMDGPU_MAX_JPEG_INSTANCES 2 + +#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) +#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) + +struct amdgpu_jpeg_reg{ + unsigned jpeg_pitch; +}; + +struct amdgpu_jpeg_inst { + struct amdgpu_ring ring_dec; + struct amdgpu_irq_src irq; + struct amdgpu_jpeg_reg external; +}; + +struct amdgpu_jpeg { + uint8_t num_jpeg_inst; + struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; + struct amdgpu_jpeg_reg internal; + unsigned harvest_config; + struct delayed_work idle_work; + enum amd_powergating_state cur_state; +}; + +int amdgpu_jpeg_sw_init(struct amdgpu_device *adev); +int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev); +int amdgpu_jpeg_suspend(struct amdgpu_device *adev); +int amdgpu_jpeg_resume(struct amdgpu_device *adev); + +void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring); + +int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring); +int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); + +#endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d55f5baa83d3..60591dbc2097 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_request_full_gpu(adev, false); - if (amdgpu_device_is_px(dev)) { + if (adev->runpm) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); } @@ -150,8 +150,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } dev->dev_private = (void *)adev; - if ((amdgpu_runtime_pm != 0) && - amdgpu_has_atpx() && + if (amdgpu_has_atpx() && (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && @@ -170,6 +169,13 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) goto out; } + if (amdgpu_device_supports_boco(dev) && + (amdgpu_runtime_pm != 0)) /* enable runpm by default */ + adev->runpm = true; + else if (amdgpu_device_supports_baco(dev) && + (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */ + adev->runpm = true; + /* Call ACPI methods: require modeset init * but failure is not fatal */ @@ -180,7 +186,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - if (amdgpu_device_is_px(dev)) { + if (adev->runpm) { dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); @@ -190,11 +196,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_put_autosuspend(dev->dev); } - amdgpu_register_gpu_instance(adev); out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ - if (adev->rmmio && amdgpu_device_is_px(dev)) + if (adev->rmmio && adev->runpm) pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); } @@ -294,6 +299,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->dm.dmcu_fw_version; fw_info->feature = 0; break; + case AMDGPU_INFO_FW_DMCUB: + fw_info->ver = adev->dm.dmcub_fw_version; + fw_info->feature = 0; + break; default: return -EINVAL; } @@ -397,12 +406,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: - type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->uvd.harvest_config & (1 << i)) + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + if (adev->jpeg.harvest_config & (1 << i)) continue; - if (adev->vcn.inst[i].ring_jpeg.sched.ready) + if (adev->jpeg.inst[i].ring_dec.sched.ready) ++num_rings; } ib_start_alignment = 16; @@ -518,9 +529,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_ENC: - case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; break; + case AMDGPU_HW_IP_VCN_JPEG: + type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? + AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; + break; default: return -EINVAL; } @@ -584,9 +598,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_vram_gtt vram_gtt; vram_gtt.vram_size = adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size); - vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - - atomic64_read(&adev->visible_pin_size); + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; + vram_gtt.vram_cpu_accessible_size = + min(adev->gmc.visible_vram_size - + atomic64_read(&adev->visible_pin_size), + vram_gtt.vram_size); vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); @@ -599,15 +616,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; mem.vram.usable_heap_size = adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size); + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = adev->gmc.visible_vram_size; - mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - - atomic64_read(&adev->visible_pin_size); + mem.cpu_accessible_vram.usable_heap_size = + min(adev->gmc.visible_vram_size - + atomic64_read(&adev->visible_pin_size), + mem.vram.usable_heap_size); mem.cpu_accessible_vram.heap_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.cpu_accessible_vram.max_allocation = @@ -650,15 +670,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return -ENOMEM; alloc_size = info->read_mmr_reg.count * sizeof(*regs); - for (i = 0; i < info->read_mmr_reg.count; i++) + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < info->read_mmr_reg.count; i++) { if (amdgpu_asic_read_register(adev, se_num, sh_num, info->read_mmr_reg.dword_offset + i, ®s[i])) { DRM_DEBUG_KMS("unallowed offset %#x\n", info->read_mmr_reg.dword_offset + i); kfree(regs); + amdgpu_gfx_off_ctrl(adev, true); return -EFAULT; } + } + amdgpu_gfx_off_ctrl(adev, true); n = copy_to_user(out, regs, min(size, alloc_size)); kfree(regs); return n ? -EFAULT : 0; @@ -679,10 +703,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (adev->pm.dpm_enabled) { dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; - } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) { - dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; - dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; } else { dev_info.max_engine_clock = adev->clock.default_sclk * 10; dev_info.max_memory_clock = adev->clock.default_mclk * 10; @@ -729,17 +749,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.vce_harvest_config = adev->vce.harvest_config; dev_info.gc_double_offchip_lds_buf = adev->gfx.config.double_offchip_lds_buf; - - if (amdgpu_ngg) { - dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr; - dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size; - dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr; - dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size; - dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr; - dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size; - dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr; - dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size; - } dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; @@ -968,6 +977,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* Ensure IB tests are run on ring */ flush_delayed_work(&adev->delayed_init_work); + + if (amdgpu_ras_intr_triggered()) { + DRM_ERROR("RAS Intr triggered, device disabled!!"); + return -EHWPOISON; + } + file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); @@ -1390,6 +1405,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* DMCUB */ + query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c new file mode 100644 index 000000000000..676c48c02d77 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "mmhub_err_count", + .debugfs_name = "mmhub_err_inject", + }; + + if (!adev->mmhub.ras_if) { + adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->mmhub.ras_if) + return -ENOMEM; + adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB; + adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mmhub.ras_if->sub_block_index = 0; + strcpy(adev->mmhub.ras_if->name, "mmhub"); + } + ih_info.head = fs_info.head = *adev->mmhub.ras_if; + r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) { + kfree(adev->mmhub.ras_if); + adev->mmhub.ras_if = NULL; + } + + return r; +} + +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && + adev->mmhub.ras_if) { + struct ras_common_if *ras_if = adev->mmhub.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 2d75ecfa199b..1cd78940cf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -23,9 +23,17 @@ struct amdgpu_mmhub_funcs { void (*ras_init)(struct amdgpu_device *adev); + int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); }; +struct amdgpu_mmhub { + struct ras_common_if *ras_if; + const struct amdgpu_mmhub_funcs *funcs; +}; + +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev); +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 31d4deb5d294..828b5167ff12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -51,438 +51,107 @@ #include "amdgpu_amdkfd.h" /** - * struct amdgpu_mn_node + * amdgpu_mn_invalidate_gfx - callback to notify about mm change * - * @it: interval node defining start-last of the affected address range - * @bos: list of all BOs in the affected address range - * - * Manages all BOs which are affected of a certain range of address space. - */ -struct amdgpu_mn_node { - struct interval_tree_node it; - struct list_head bos; -}; - -/** - * amdgpu_mn_destroy - destroy the HMM mirror - * - * @work: previously sheduled work item - * - * Lazy destroys the notifier from a work item - */ -static void amdgpu_mn_destroy(struct work_struct *work) -{ - struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work); - struct amdgpu_device *adev = amn->adev; - struct amdgpu_mn_node *node, *next_node; - struct amdgpu_bo *bo, *next_bo; - - mutex_lock(&adev->mn_lock); - down_write(&amn->lock); - hash_del(&amn->node); - rbtree_postorder_for_each_entry_safe(node, next_node, - &amn->objects.rb_root, it.rb) { - list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { - bo->mn = NULL; - list_del_init(&bo->mn_list); - } - kfree(node); - } - up_write(&amn->lock); - mutex_unlock(&adev->mn_lock); - - hmm_mirror_unregister(&amn->mirror); - kfree(amn); -} - -/** - * amdgpu_hmm_mirror_release - callback to notify about mm destruction - * - * @mirror: the HMM mirror (mm) this callback is about - * - * Shedule a work item to lazy destroy HMM mirror. - */ -static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror) -{ - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); - - INIT_WORK(&amn->work, amdgpu_mn_destroy); - schedule_work(&amn->work); -} - -/** - * amdgpu_mn_lock - take the write side lock for this notifier - * - * @mn: our notifier - */ -void amdgpu_mn_lock(struct amdgpu_mn *mn) -{ - if (mn) - down_write(&mn->lock); -} - -/** - * amdgpu_mn_unlock - drop the write side lock for this notifier - * - * @mn: our notifier - */ -void amdgpu_mn_unlock(struct amdgpu_mn *mn) -{ - if (mn) - up_write(&mn->lock); -} - -/** - * amdgpu_mn_read_lock - take the read side lock for this notifier - * - * @amn: our notifier - */ -static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) -{ - if (blockable) - down_read(&amn->lock); - else if (!down_read_trylock(&amn->lock)) - return -EAGAIN; - - return 0; -} - -/** - * amdgpu_mn_read_unlock - drop the read side lock for this notifier - * - * @amn: our notifier - */ -static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn) -{ - up_read(&amn->lock); -} - -/** - * amdgpu_mn_invalidate_node - unmap all BOs of a node - * - * @node: the node with the BOs to unmap - * @start: start of address range affected - * @end: end of address range affected + * @mni: the range (mm) is about to update + * @range: details on the invalidation + * @cur_seq: Value to pass to mmu_interval_set_seq() * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ -static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, - unsigned long start, - unsigned long end) +static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) { - struct amdgpu_bo *bo; + struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); long r; - list_for_each_entry(bo, &node->bos, mn_list) { - - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) - continue; - - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - DRM_ERROR("(%ld) failed to wait for user bo\n", r); - } -} - -/** - * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change - * - * @mirror: the hmm_mirror (mm) is about to update - * @update: the update start, end address - * - * Block for operations on BOs to finish and mark pages as accessed and - * potentially dirty. - */ -static int -amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, - const struct mmu_notifier_range *update) -{ - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); - unsigned long start = update->start; - unsigned long end = update->end; - bool blockable = mmu_notifier_range_blockable(update); - struct interval_tree_node *it; + if (!mmu_notifier_range_blockable(range)) + return false; - /* notification is exclusive, but interval is inclusive */ - end -= 1; + mutex_lock(&adev->notifier_lock); - /* TODO we should be able to split locking for interval tree and - * amdgpu_mn_invalidate_node - */ - if (amdgpu_mn_read_lock(amn, blockable)) - return -EAGAIN; + mmu_interval_set_seq(mni, cur_seq); - it = interval_tree_iter_first(&amn->objects, start, end); - while (it) { - struct amdgpu_mn_node *node; - - if (!blockable) { - amdgpu_mn_read_unlock(amn); - return -EAGAIN; - } - - node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); - - amdgpu_mn_invalidate_node(node, start, end); - } - - amdgpu_mn_read_unlock(amn); - - return 0; -} - -/** - * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change - * - * @mirror: the hmm_mirror (mm) is about to update - * @update: the update start, end address - * - * We temporarily evict all BOs between start and end. This - * necessitates evicting all user-mode queues of the process. The BOs - * are restorted in amdgpu_mn_invalidate_range_end_hsa. - */ -static int -amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror, - const struct mmu_notifier_range *update) -{ - struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); - unsigned long start = update->start; - unsigned long end = update->end; - bool blockable = mmu_notifier_range_blockable(update); - struct interval_tree_node *it; - - /* notification is exclusive, but interval is inclusive */ - end -= 1; - - if (amdgpu_mn_read_lock(amn, blockable)) - return -EAGAIN; - - it = interval_tree_iter_first(&amn->objects, start, end); - while (it) { - struct amdgpu_mn_node *node; - struct amdgpu_bo *bo; - - if (!blockable) { - amdgpu_mn_read_unlock(amn); - return -EAGAIN; - } - - node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); - - list_for_each_entry(bo, &node->bos, mn_list) { - struct kgd_mem *mem = bo->kfd_bo; - - if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, - start, end)) - amdgpu_amdkfd_evict_userptr(mem, amn->mm); - } - } - - amdgpu_mn_read_unlock(amn); - - return 0; + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + mutex_unlock(&adev->notifier_lock); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); + return true; } -/* Low bits of any reasonable mm pointer will be unused due to struct - * alignment. Use these bits to make a unique key from the mm pointer - * and notifier type. - */ -#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type)) - -static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = { - [AMDGPU_MN_TYPE_GFX] = { - .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx, - .release = amdgpu_hmm_mirror_release - }, - [AMDGPU_MN_TYPE_HSA] = { - .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa, - .release = amdgpu_hmm_mirror_release - }, +static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = { + .invalidate = amdgpu_mn_invalidate_gfx, }; /** - * amdgpu_mn_get - create HMM mirror context + * amdgpu_mn_invalidate_hsa - callback to notify about mm change * - * @adev: amdgpu device pointer - * @type: type of MMU notifier context + * @mni: the range (mm) is about to update + * @range: details on the invalidation + * @cur_seq: Value to pass to mmu_interval_set_seq() * - * Creates a HMM mirror context for current->mm. + * We temporarily evict the BO attached to this range. This necessitates + * evicting all user-mode queues of the process. */ -struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, - enum amdgpu_mn_type type) +static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) { - struct mm_struct *mm = current->mm; - struct amdgpu_mn *amn; - unsigned long key = AMDGPU_MN_KEY(mm, type); - int r; - - mutex_lock(&adev->mn_lock); - if (down_write_killable(&mm->mmap_sem)) { - mutex_unlock(&adev->mn_lock); - return ERR_PTR(-EINTR); - } - - hash_for_each_possible(adev->mn_hash, amn, node, key) - if (AMDGPU_MN_KEY(amn->mm, amn->type) == key) - goto release_locks; - - amn = kzalloc(sizeof(*amn), GFP_KERNEL); - if (!amn) { - amn = ERR_PTR(-ENOMEM); - goto release_locks; - } - - amn->adev = adev; - amn->mm = mm; - init_rwsem(&amn->lock); - amn->type = type; - amn->objects = RB_ROOT_CACHED; - - amn->mirror.ops = &amdgpu_hmm_mirror_ops[type]; - r = hmm_mirror_register(&amn->mirror, mm); - if (r) - goto free_amn; + struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type)); + if (!mmu_notifier_range_blockable(range)) + return false; -release_locks: - up_write(&mm->mmap_sem); - mutex_unlock(&adev->mn_lock); + mutex_lock(&adev->notifier_lock); - return amn; + mmu_interval_set_seq(mni, cur_seq); -free_amn: - up_write(&mm->mmap_sem); - mutex_unlock(&adev->mn_lock); - kfree(amn); + amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm); + mutex_unlock(&adev->notifier_lock); - return ERR_PTR(r); + return true; } +static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = { + .invalidate = amdgpu_mn_invalidate_hsa, +}; + /** * amdgpu_mn_register - register a BO for notifier updates * * @bo: amdgpu buffer object * @addr: userptr addr we should monitor * - * Registers an HMM mirror for the given BO at the specified address. + * Registers a mmu_notifier for the given BO at the specified address. * Returns 0 on success, -ERRNO if anything goes wrong. */ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) { - unsigned long end = addr + amdgpu_bo_size(bo) - 1; - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - enum amdgpu_mn_type type = - bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX; - struct amdgpu_mn *amn; - struct amdgpu_mn_node *node = NULL, *new_node; - struct list_head bos; - struct interval_tree_node *it; - - amn = amdgpu_mn_get(adev, type); - if (IS_ERR(amn)) - return PTR_ERR(amn); - - new_node = kmalloc(sizeof(*new_node), GFP_KERNEL); - if (!new_node) - return -ENOMEM; - - INIT_LIST_HEAD(&bos); - - down_write(&amn->lock); - - while ((it = interval_tree_iter_first(&amn->objects, addr, end))) { - kfree(node); - node = container_of(it, struct amdgpu_mn_node, it); - interval_tree_remove(&node->it, &amn->objects); - addr = min(it->start, addr); - end = max(it->last, end); - list_splice(&node->bos, &bos); - } - - if (!node) - node = new_node; - else - kfree(new_node); - - bo->mn = amn; - - node->it.start = addr; - node->it.last = end; - INIT_LIST_HEAD(&node->bos); - list_splice(&bos, &node->bos); - list_add(&bo->mn_list, &node->bos); - - interval_tree_insert(&node->it, &amn->objects); - - up_write(&amn->lock); - - return 0; + if (bo->kfd_bo) + return mmu_interval_notifier_insert(&bo->notifier, current->mm, + addr, amdgpu_bo_size(bo), + &amdgpu_mn_hsa_ops); + return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, + amdgpu_bo_size(bo), + &amdgpu_mn_gfx_ops); } /** - * amdgpu_mn_unregister - unregister a BO for HMM mirror updates + * amdgpu_mn_unregister - unregister a BO for notifier updates * * @bo: amdgpu buffer object * - * Remove any registration of HMM mirror updates from the buffer object. + * Remove any registration of mmu notifier updates from the buffer object. */ void amdgpu_mn_unregister(struct amdgpu_bo *bo) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct amdgpu_mn *amn; - struct list_head *head; - - mutex_lock(&adev->mn_lock); - - amn = bo->mn; - if (amn == NULL) { - mutex_unlock(&adev->mn_lock); + if (!bo->notifier.mm) return; - } - - down_write(&amn->lock); - - /* save the next list entry for later */ - head = bo->mn_list.next; - - bo->mn = NULL; - list_del_init(&bo->mn_list); - - if (list_empty(head)) { - struct amdgpu_mn_node *node; - - node = container_of(head, struct amdgpu_mn_node, bos); - interval_tree_remove(&node->it, &amn->objects); - kfree(node); - } - - up_write(&amn->lock); - mutex_unlock(&adev->mn_lock); -} - -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ - 0 /* HMM_PFN_DEVICE_PRIVATE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - -void amdgpu_hmm_init_range(struct hmm_range *range) -{ - if (range) { - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift = PAGE_SHIFT; - } + mmu_interval_notifier_remove(&bo->notifier); + bo->notifier.mm = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index b8ed68943625..a292238f75eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -30,63 +30,10 @@ #include <linux/workqueue.h> #include <linux/interval_tree.h> -enum amdgpu_mn_type { - AMDGPU_MN_TYPE_GFX, - AMDGPU_MN_TYPE_HSA, -}; - -/** - * struct amdgpu_mn - * - * @adev: amdgpu device pointer - * @mm: process address space - * @type: type of MMU notifier - * @work: destruction work item - * @node: hash table node to find structure by adev and mn - * @lock: rw semaphore protecting the notifier nodes - * @objects: interval tree containing amdgpu_mn_nodes - * @mirror: HMM mirror function support - * - * Data for each amdgpu device and process address space. - */ -struct amdgpu_mn { - /* constant after initialisation */ - struct amdgpu_device *adev; - struct mm_struct *mm; - enum amdgpu_mn_type type; - - /* only used on destruction */ - struct work_struct work; - - /* protected by adev->mn_lock */ - struct hlist_node node; - - /* objects protected by lock */ - struct rw_semaphore lock; - struct rb_root_cached objects; - -#ifdef CONFIG_HMM_MIRROR - /* HMM mirror */ - struct hmm_mirror mirror; -#endif -}; - #if defined(CONFIG_HMM_MIRROR) -void amdgpu_mn_lock(struct amdgpu_mn *mn); -void amdgpu_mn_unlock(struct amdgpu_mn *mn); -struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, - enum amdgpu_mn_type type); int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_mn_unregister(struct amdgpu_bo *bo); -void amdgpu_hmm_init_range(struct hmm_range *range); #else -static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} -static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} -static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, - enum amdgpu_mn_type type) -{ - return NULL; -} static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) { DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, " diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c new file mode 100644 index 000000000000..7d5c3a9de9ea --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "pcie_bif_err_count", + .debugfs_name = "pcie_bif_err_inject", + }; + + if (!adev->nbio.ras_if) { + adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->nbio.ras_if) + return -ENOMEM; + adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF; + adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->nbio.ras_if->sub_block_index = 0; + strcpy(adev->nbio.ras_if->name, "pcie_bif"); + } + ih_info.head = fs_info.head = *adev->nbio.ras_if; + r = amdgpu_ras_late_init(adev, adev->nbio.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0); + if (r) + goto late_fini; + r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + return 0; +late_fini: + amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info); +free: + kfree(adev->nbio.ras_if); + adev->nbio.ras_if = NULL; + return r; +} + +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) && + adev->nbio.ras_if) { + struct ras_common_if *ras_if = adev->nbio.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h new file mode 100644 index 000000000000..919bd566ba3c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_NBIO_H__ +#define __AMDGPU_NBIO_H__ + +/* + * amdgpu nbio functions + */ +struct nbio_hdp_flush_reg { + u32 ref_and_mask_cp0; + u32 ref_and_mask_cp1; + u32 ref_and_mask_cp2; + u32 ref_and_mask_cp3; + u32 ref_and_mask_cp4; + u32 ref_and_mask_cp5; + u32 ref_and_mask_cp6; + u32 ref_and_mask_cp7; + u32 ref_and_mask_cp8; + u32 ref_and_mask_cp9; + u32 ref_and_mask_sdma0; + u32 ref_and_mask_sdma1; + u32 ref_and_mask_sdma2; + u32 ref_and_mask_sdma3; + u32 ref_and_mask_sdma4; + u32 ref_and_mask_sdma5; + u32 ref_and_mask_sdma6; + u32 ref_and_mask_sdma7; +}; + +struct amdgpu_nbio_funcs { + const struct nbio_hdp_flush_reg *hdp_flush_reg; + u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); + u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); + u32 (*get_rev_id)(struct amdgpu_device *adev); + void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); + void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + u32 (*get_memsize)(struct amdgpu_device *adev); + void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, int doorbell_size); + void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, + int doorbell_index, int instance); + void (*enable_doorbell_aperture)(struct amdgpu_device *adev, + bool enable); + void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, + bool enable); + void (*ih_doorbell_range)(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index); + void (*enable_doorbell_interrupt)(struct amdgpu_device *adev, + bool enable); + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, + bool enable); + void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, + bool enable); + void (*get_clockgating_state)(struct amdgpu_device *adev, + u32 *flags); + void (*ih_control)(struct amdgpu_device *adev); + void (*init_registers)(struct amdgpu_device *adev); + void (*detect_hw_virt)(struct amdgpu_device *adev); + void (*remap_hdp_registers)(struct amdgpu_device *adev); + void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); + void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); + int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); + int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + int (*ras_late_init)(struct amdgpu_device *adev); +}; + +struct amdgpu_nbio { + const struct nbio_hdp_flush_reg *hdp_flush_reg; + struct amdgpu_irq_src ras_controller_irq; + struct amdgpu_irq_src ras_err_event_athub_irq; + struct ras_common_if *ras_if; + const struct amdgpu_nbio_funcs *funcs; +}; + +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev); +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f0b789a0b49..e3f16b49e970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, } /** + * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location + * + * @adev: amdgpu device object + * @offset: offset of the BO + * @size: size of the BO + * @domain: where to place it + * @bo_ptr: used to initialize BOs in structures + * @cpu_addr: optional CPU address mapping + * + * Creates a kernel BO at a specific offset in the address space of the domain. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + unsigned int i; + int r; + + offset &= PAGE_MASK; + size = ALIGN(size, PAGE_SIZE); + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, + NULL, cpu_addr); + if (r) + return r; + + /* + * Remove the original mem node and create a new one at the request + * position. + */ + if (cpu_addr) + amdgpu_bo_kunmap(*bo_ptr); + + ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + + for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { + (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; + (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; + } + r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, + &(*bo_ptr)->tbo.mem, &ctx); + if (r) + goto error; + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) + goto error; + } + + amdgpu_bo_unreserve(*bo_ptr); + return 0; + +error: + amdgpu_bo_unreserve(*bo_ptr); + amdgpu_bo_unref(bo_ptr); + return r; +} + +/** * amdgpu_bo_free_kernel - free BO for kernel use * * @bo: amdgpu BO to free @@ -451,9 +515,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, { struct ttm_operation_ctx ctx = { .interruptible = (bp->type != ttm_bo_type_kernel), - .no_wait_gpu = false, + .no_wait_gpu = bp->no_wait_gpu, .resv = bp->resv, - .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT + .flags = bp->type != ttm_bo_type_kernel ? + TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 658f4c9779b7..36dec51d1ef1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -30,6 +30,9 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" +#ifdef CONFIG_MMU_NOTIFIER +#include <linux/mmu_notifier.h> +#endif #define AMDGPU_BO_INVALID_OFFSET LONG_MAX #define AMDGPU_BO_MAX_PLACEMENTS 3 @@ -41,6 +44,7 @@ struct amdgpu_bo_param { u32 preferred_domain; u64 flags; enum ttm_bo_type type; + bool no_wait_gpu; struct dma_resv *resv; }; @@ -100,10 +104,12 @@ struct amdgpu_bo { struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; - union { - struct list_head mn_list; - struct list_head shadow_list; - }; + +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_interval_notifier notifier; +#endif + + struct list_head shadow_list; struct kgd_mem *kfd_bo; }; @@ -237,6 +243,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, u64 *gpu_addr, void **cpu_addr); +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr); void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03930313c263..b32adda70bbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,9 +159,12 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { if (adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); + pm = smu_get_current_power_state(&adev->smu); else pm = adev->pm.dpm.user_state; } else if (adev->powerplay.pp_funcs->get_current_power_state) { @@ -184,6 +187,9 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (strncmp("battery", buf, strlen("battery")) == 0) state = POWER_STATE_TYPE_BATTERY; else if (strncmp("balanced", buf, strlen("balanced")) == 0) @@ -283,7 +289,7 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_dpm_forced_level level = 0xff; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; if ((adev->flags & AMD_IS_PX) && @@ -320,6 +326,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, enum amd_dpm_forced_level current_level = 0xff; int ret = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + /* Can't force performance level when the card is off */ if ((adev->flags & AMD_IS_PX) && (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) @@ -348,19 +357,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, goto fail; } - /* handle sriov case here */ - if (amdgpu_sriov_vf(adev)) { - if (amdgim_is_hwperf(adev) && - adev->virt.ops->force_dpm_level) { - mutex_lock(&adev->pm.mutex); - adev->virt.ops->force_dpm_level(adev, level); - mutex_unlock(&adev->pm.mutex); - return count; - } else { - return -EINVAL; - } - } - if (is_support_sw_smu(adev)) current_level = smu_get_performance_level(&adev->smu); else if (adev->powerplay.pp_funcs->get_performance_level) @@ -440,6 +436,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, enum amd_pm_state_type pm = 0; int i = 0, ret = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { pm = smu_get_current_power_state(smu); ret = smu_get_power_num_states(smu, &data); @@ -469,6 +468,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (adev->pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else @@ -486,6 +488,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, unsigned long idx; int ret; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (strlen(buf) == 1) adev->pp_force_state_enabled = false; else if (is_support_sw_smu(adev)) @@ -535,6 +540,9 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *table = NULL; int size; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { size = smu_sys_get_pp_table(&adev->smu, (void **)&table); if (size < 0) @@ -562,6 +570,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (is_support_sw_smu(adev)) { ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); if (ret) @@ -654,6 +665,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + if (count > 127) return -EINVAL; @@ -726,6 +740,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t size = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); @@ -770,6 +787,9 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev, uint64_t featuremask; int ret; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + ret = kstrtou64(buf, 0, &featuremask); if (ret) return -EINVAL; @@ -796,6 +816,9 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) { return smu_sys_get_pp_feature_mask(&adev->smu, buf); } else if (adev->powerplay.pp_funcs->get_ppfeature_status) @@ -805,8 +828,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, } /** - * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk - * pp_dpm_pcie + * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie * * The amdgpu driver provides a sysfs API for adjusting what power levels * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, @@ -822,9 +844,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, * * To manually adjust these states, first select manual using * power_dpm_force_performance_level. - * Secondly,Enter a new value for each level by inputing a string that + * Secondly, enter a new value for each level by inputing a string that * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" - * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. + * E.g., + * + * .. code-block:: bash + * + * echo "4 5 6" > pp_dpm_sclk + * + * will enable sclk levels 4, 5, and 6. * * NOTE: change to the dcefclk max dpm level is not supported now */ @@ -836,9 +864,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) - return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); @@ -894,15 +921,15 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev)) - return 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -919,9 +946,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && - adev->virt.ops->get_pp_clk) - return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); @@ -941,15 +967,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, int ret; uint32_t mask = 0; - if (amdgpu_sriov_vf(adev)) - return 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -966,6 +992,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -984,12 +1013,15 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); @@ -1006,6 +1038,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -1024,12 +1059,15 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); @@ -1046,6 +1084,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -1064,12 +1105,15 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); @@ -1086,6 +1130,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) @@ -1104,12 +1151,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, int ret; uint32_t mask = 0; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + ret = amdgpu_read_mask(buf, count, &mask); if (ret) return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); @@ -1127,6 +1177,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); else if (adev->powerplay.pp_funcs->get_sclk_od) @@ -1145,6 +1198,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, int ret; long int value; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + ret = kstrtol(buf, 0, &value); if (ret) { @@ -1178,6 +1234,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint32_t value = 0; + if (amdgpu_sriov_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); else if (adev->powerplay.pp_funcs->get_mclk_od) @@ -1196,6 +1255,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, int ret; long int value; + if (amdgpu_sriov_vf(adev)) + return 0; + ret = kstrtol(buf, 0, &value); if (ret) { @@ -1248,6 +1310,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (is_support_sw_smu(adev)) return smu_get_power_profile_mode(&adev->smu, buf); else if (adev->powerplay.pp_funcs->get_power_profile_mode) @@ -1280,6 +1345,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) goto fail; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { if (count < 2 || count > 127) return -EINVAL; @@ -1301,7 +1369,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; if (is_support_sw_smu(adev)) - ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); + ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); else if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); if (!ret) @@ -1326,6 +1394,9 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size); @@ -1352,6 +1423,9 @@ static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int r, value, size = sizeof(value); + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + /* read the IP busy sensor */ r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size); @@ -1382,6 +1456,9 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; uint64_t count0, count1; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + amdgpu_asic_get_pcie_usage(adev, &count0, &count1); return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", count0, count1, pcie_get_mps(adev->pdev)); @@ -1404,6 +1481,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + if (adev->unique_id) return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); @@ -1598,6 +1678,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); u32 pwm_mode = 0; + if (is_support_sw_smu(adev)) { pwm_mode = smu_get_fan_control_mode(&adev->smu); } else { @@ -2010,7 +2091,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, true); + smu_get_power_limit(&adev->smu, &limit, true, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); @@ -2028,7 +2109,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, false); + smu_get_power_limit(&adev->smu, &limit, false, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); @@ -2048,6 +2129,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; + if (amdgpu_sriov_vf(adev)) + return -EINVAL; + err = kstrtou32(buf, 10, &value); if (err) return err; @@ -2196,9 +2280,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * - fan1_input: fan speed in RPM * - * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) + * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) * - * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable + * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable * * hwmon interfaces for GPU clocks: * @@ -2294,6 +2378,23 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; + /* under multi-vf mode, the hwmon attributes are all not supported */ + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + + /* there is no fan under pp one vf mode */ + if (amdgpu_sriov_is_pp_one_vf(adev) && + (attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_input.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_target.dev_attr.attr || + attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) + return 0; + /* Skip fan attributes if fan is not present */ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || @@ -2713,42 +2814,16 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) } -int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) { int ret = 0; - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) - return ret; - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) { - DRM_ERROR("failed to create device file for dpm state\n"); - return ret; + if (is_support_sw_smu(adev)) { + ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_JPEG, enable); + if (ret) + DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n", + enable ? "true" : "false", ret); } - - return ret; -} - -void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) -{ - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) - return; - - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); } int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) @@ -2825,6 +2900,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_dpm_sclk\n"); return ret; } + + /* Arcturus does not support standalone mclk/socclk/fclk level setting */ + if (adev->asic_type == CHIP_ARCTURUS) { + dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_mclk.store = NULL; + + dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_socclk.store = NULL; + + dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_fclk.store = NULL; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); if (ret) { DRM_ERROR("failed to create device file pp_dpm_mclk\n"); @@ -3008,7 +3096,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; smu_handle_task(&adev->smu, smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + true); } else { if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index ef31448ee8d8..3da1da277805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -41,5 +41,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_thermal_work_handler(struct work_struct *work); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4d71537a960d..c14f2ccd0677 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -34,6 +34,8 @@ #include "psp_v11_0.h" #include "psp_v12_0.h" +#include "amdgpu_ras.h" + static void psp_set_funcs(struct amdgpu_device *adev); static int psp_early_init(void *handle) @@ -88,6 +90,17 @@ static int psp_sw_init(void *handle) return ret; } + ret = psp_mem_training_init(psp); + if (ret) { + DRM_ERROR("Failed to initialize memory training!\n"); + return ret; + } + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + return 0; } @@ -95,6 +108,7 @@ static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + psp_mem_training_fini(&adev->psp); release_firmware(adev->psp.sos_fw); adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); @@ -144,17 +158,26 @@ psp_cmd_submit_buf(struct psp_context *psp, memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); index = atomic_inc_return(&psp->fence_value); - ret = psp_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); + ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); if (ret) { atomic_dec(&psp->fence_value); mutex_unlock(&psp->mutex); return ret; } + amdgpu_asic_invalidate_hdp(psp->adev, NULL); while (*((unsigned int *)psp->fence_buf) != index) { if (--timeout == 0) break; + /* + * Shouldn't wait for timeout when err_event_athub occurs, + * because gpu reset thread triggered and lock resource should + * be released for psp resume sequence. + */ + if (amdgpu_ras_intr_triggered()) + break; msleep(1); + amdgpu_asic_invalidate_hdp(psp->adev, NULL); } /* In some cases, psp response status is not 0 even there is no @@ -168,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp, if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); - DRM_WARN("psp command failed and response status is (0x%X)\n", - psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); + DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n", + psp->cmd_buf_mem->cmd_id, + psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); if (!timeout) { mutex_unlock(&psp->mutex); return -EINVAL; @@ -253,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp) /* For ASICs support RLC autoload, psp will parse the toc * and calculate the total size of TMR needed */ - if (psp->toc_start_addr && + if (!amdgpu_sriov_vf(psp->adev) && + psp->toc_start_addr && psp->toc_bin_size && psp->fw_pri_buf) { ret = psp_load_toc(psp, &tmr_size); @@ -287,47 +312,23 @@ static int psp_tmr_load(struct psp_context *psp) ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - if (ret) - goto failed; kfree(cmd); - return 0; - -failed: - kfree(cmd); return ret; } -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t asd_mc, uint64_t asd_mc_shared, - uint32_t size, uint32_t shared_size) +static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t asd_mc, uint32_t size) { cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); cmd->cmd.cmd_load_ta.app_len = size; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared); - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; -} - -static int psp_asd_init(struct psp_context *psp) -{ - int ret; - - /* - * Allocate 16k memory aligned to 4k from Frame Buffer (local - * physical) for shared ASD <-> Driver - */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &psp->asd_shared_bo, - &psp->asd_shared_mc_addr, - &psp->asd_shared_buf); - - return ret; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; + cmd->cmd.cmd_load_ta.cmd_buf_len = 0; } static int psp_asd_load(struct psp_context *psp) @@ -349,11 +350,49 @@ static int psp_asd_load(struct psp_context *psp) memset(psp->fw_pri_buf, 0, PSP_1_MEG); memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr, - psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); + psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->asd_ucode_size); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + if (!ret) { + psp->asd_context.asd_initialized = true; + psp->asd_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} + +static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t asd_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = asd_session_id; +} + +static int psp_asd_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->asd_context.asd_initialized) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_asd_unload_cmd_buf(cmd, psp->asd_context.session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (!ret) + psp->asd_context.asd_initialized = false; kfree(cmd); @@ -548,7 +587,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) struct ta_xgmi_shared_memory *xgmi_cmd; int ret; - if (!psp->adev->psp.ta_fw) + if (!psp->adev->psp.ta_fw || + !psp->adev->psp.ta_xgmi_ucode_size || + !psp->adev->psp.ta_xgmi_start_addr) return -ENOENT; if (!psp->xgmi_context.initialized) { @@ -737,6 +778,12 @@ static int psp_ras_terminate(struct psp_context *psp) { int ret; + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->ras.ras_initialized) return 0; @@ -758,6 +805,18 @@ static int psp_ras_initialize(struct psp_context *psp) { int ret; + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_ras_ucode_size || + !psp->adev->psp.ta_ras_start_addr) { + dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); + return 0; + } + if (!psp->ras.ras_initialized) { ret = psp_ras_init_shared_buf(psp); if (ret) @@ -772,6 +831,360 @@ static int psp_ras_initialize(struct psp_context *psp) } // ras end +// HDCP start +static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t hdcp_ta_mc, + uint64_t hdcp_mc_shared, + uint32_t hdcp_ta_size, + uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc); + cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = + lower_32_bits(hdcp_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = + upper_32_bits(hdcp_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_hdcp_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for hdcp ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->hdcp_context.hdcp_shared_bo, + &psp->hdcp_context.hdcp_shared_mc_addr, + &psp->hdcp_context.hdcp_shared_buf); + + return ret; +} + +static int psp_hdcp_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, + psp->ta_hdcp_ucode_size); + + psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->hdcp_context.hdcp_shared_mc_addr, + psp->ta_hdcp_ucode_size, + PSP_HDCP_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->hdcp_context.hdcp_initialized = 1; + psp->hdcp_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} +static int psp_hdcp_initialize(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_hdcp_ucode_size || + !psp->adev->psp.ta_hdcp_start_addr) { + dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); + return 0; + } + + if (!psp->hdcp_context.hdcp_initialized) { + ret = psp_hdcp_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_hdcp_load(psp); + if (ret) + return ret; + + return 0; +} +static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t hdcp_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id; +} + +static int psp_hdcp_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the unloading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t hdcp_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->hdcp_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_hdcp_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->hdcp_context.hdcp_initialized) + return 0; + + ret = psp_hdcp_unload(psp); + if (ret) + return ret; + + psp->hdcp_context.hdcp_initialized = 0; + + /* free hdcp shared memory */ + amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, + &psp->hdcp_context.hdcp_shared_mc_addr, + &psp->hdcp_context.hdcp_shared_buf); + + return 0; +} +// HDCP end + +// DTM start +static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t dtm_ta_mc, + uint64_t dtm_mc_shared, + uint32_t dtm_ta_size, + uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc); + cmd->cmd.cmd_load_ta.app_len = dtm_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_dtm_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for dtm ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->dtm_context.dtm_shared_bo, + &psp->dtm_context.dtm_shared_mc_addr, + &psp->dtm_context.dtm_shared_buf); + + return ret; +} + +static int psp_dtm_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); + + psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->dtm_context.dtm_shared_mc_addr, + psp->ta_dtm_ucode_size, + PSP_DTM_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->dtm_context.dtm_initialized = 1; + psp->dtm_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} + +static int psp_dtm_initialize(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_dtm_ucode_size || + !psp->adev->psp.ta_dtm_start_addr) { + dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); + return 0; + } + + if (!psp->dtm_context.dtm_initialized) { + ret = psp_dtm_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_dtm_load(psp); + if (ret) + return ret; + + return 0; +} + +static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t dtm_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->dtm_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_dtm_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->dtm_context.dtm_initialized) + return 0; + + ret = psp_hdcp_unload(psp); + if (ret) + return ret; + + psp->dtm_context.dtm_initialized = 0; + + /* free hdcp shared memory */ + amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, + &psp->dtm_context.dtm_shared_mc_addr, + &psp->dtm_context.dtm_shared_buf); + + return 0; +} +// DTM end + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -818,35 +1231,6 @@ static int psp_hw_start(struct psp_context *psp) return ret; } - ret = psp_asd_init(psp); - if (ret) { - DRM_ERROR("PSP asd init failed!\n"); - return ret; - } - - ret = psp_asd_load(psp); - if (ret) { - DRM_ERROR("PSP load asd failed!\n"); - return ret; - } - - if (adev->gmc.xgmi.num_physical_nodes > 1) { - ret = psp_xgmi_initialize(psp); - /* Warning the XGMI seesion initialize failure - * Instead of stop driver initialization - */ - if (ret) - dev_err(psp->adev->dev, - "XGMI: Failed to initialize XGMI session\n"); - } - - if (psp->adev->psp.ta_fw) { - ret = psp_ras_initialize(psp); - if (ret) - dev_err(psp->adev->dev, - "RAS: Failed to initialize RAS\n"); - } - return 0; } @@ -938,6 +1322,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_VCN1_RAM: *type = GFX_FW_TYPE_VCN1_RAM; break; + case AMDGPU_UCODE_ID_DMCUB: + *type = GFX_FW_TYPE_DMUB; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; @@ -950,21 +1337,7 @@ static void psp_print_fw_hdr(struct psp_context *psp, struct amdgpu_firmware_info *ucode) { struct amdgpu_device *adev = psp->adev; - const struct sdma_firmware_header_v1_0 *sdma_hdr = - (const struct sdma_firmware_header_v1_0 *) - adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; - const struct gfx_firmware_header_v1_0 *ce_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; - const struct gfx_firmware_header_v1_0 *pfp_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; - const struct gfx_firmware_header_v1_0 *me_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; - const struct gfx_firmware_header_v1_0 *mec_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - const struct rlc_firmware_header_v2_0 *rlc_hdr = - (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; - const struct smc_firmware_header_v1_0 *smc_hdr = - (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + struct common_firmware_header *hdr; switch (ucode->ucode_id) { case AMDGPU_UCODE_ID_SDMA0: @@ -975,25 +1348,33 @@ static void psp_print_fw_hdr(struct psp_context *psp, case AMDGPU_UCODE_ID_SDMA5: case AMDGPU_UCODE_ID_SDMA6: case AMDGPU_UCODE_ID_SDMA7: - amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header); + hdr = (struct common_firmware_header *) + adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; + amdgpu_ucode_print_sdma_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_CE: - amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_PFP: - amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_ME: - amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_MEC1: - amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_RLC_G: - amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; + amdgpu_ucode_print_rlc_hdr(hdr); break; case AMDGPU_UCODE_ID_SMC: - amdgpu_ucode_print_smc_hdr(&smc_hdr->header); + hdr = (struct common_firmware_header *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(hdr); break; default: break; @@ -1070,7 +1451,10 @@ out: || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) /*skip ucode loading in SRIOV VF */ continue; @@ -1079,10 +1463,6 @@ out: ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) /* skip mec JT when autoload is enabled */ continue; - /* Renoir only needs to load mec jump table one time */ - if (adev->asic_type == CHIP_RENOIR && - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) - continue; psp_print_fw_hdr(psp, ucode); @@ -1091,7 +1471,8 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? + AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); @@ -1165,6 +1546,39 @@ skip_memalloc: if (ret) goto failed; + ret = psp_asd_load(psp); + if (ret) { + DRM_ERROR("PSP load asd failed!\n"); + return ret; + } + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + + if (psp->adev->psp.ta_fw) { + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); + } + return 0; failed: @@ -1216,8 +1630,13 @@ static int psp_hw_fini(void *handle) psp->xgmi_context.initialized == 1) psp_xgmi_terminate(psp); - if (psp->adev->psp.ta_fw) + if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); + psp_dtm_terminate(psp); + psp_hdcp_terminate(psp); + } + + psp_asd_unload(psp); psp_ring_destroy(psp, PSP_RING_TYPE__KM); @@ -1227,8 +1646,6 @@ static int psp_hw_fini(void *handle) &psp->fw_pri_mc_addr, &psp->fw_pri_buf); amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); - amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, - &psp->asd_shared_buf); amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, (void **)&psp->cmd_buf_mem); @@ -1259,6 +1676,16 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate ras ta\n"); return ret; } + ret = psp_hdcp_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate hdcp ta\n"); + return ret; + } + ret = psp_dtm_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate dtm ta\n"); + return ret; + } } ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); @@ -1278,6 +1705,12 @@ static int psp_resume(void *handle) DRM_INFO("PSP is resuming...\n"); + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + mutex_lock(&adev->firmware.mutex); ret = psp_hw_start(psp); @@ -1288,6 +1721,39 @@ static int psp_resume(void *handle) if (ret) goto failed; + ret = psp_asd_load(psp); + if (ret) { + DRM_ERROR("PSP load asd failed!\n"); + goto failed; + } + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + + if (psp->adev->psp.ta_fw) { + ret = psp_ras_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); + } + mutex_unlock(&adev->firmware.mutex); return 0; @@ -1317,9 +1783,6 @@ int psp_rlc_autoload_start(struct psp_context *psp) int ret; struct psp_gfx_cmd_resp *cmd; - if (amdgpu_sriov_vf(psp->adev)) - return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1345,6 +1808,56 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, return psp_execute_np_fw_load(&adev->psp, &ucode); } +int psp_ring_cmd_submit(struct psp_context *psp, + uint64_t cmd_buf_mc_addr, + uint64_t fence_mc_addr, + int index) +{ + unsigned int psp_write_ptr_reg = 0; + struct psp_gfx_rb_frame *write_frame; + struct psp_ring *ring = &psp->km_ring; + struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; + struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + + ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; + struct amdgpu_device *adev = psp->adev; + uint32_t ring_size_dw = ring->ring_size / 4; + uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; + + /* KM (GPCOM) prepare write pointer */ + psp_write_ptr_reg = psp_ring_get_wptr(psp); + + /* Update KM RB frame pointer to new frame */ + /* write_frame ptr increments by size of rb_frame in bytes */ + /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ + if ((psp_write_ptr_reg % ring_size_dw) == 0) + write_frame = ring_buffer_start; + else + write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); + /* Check invalid write_frame ptr address */ + if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { + DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", + ring_buffer_start, ring_buffer_end, write_frame); + DRM_ERROR("write_frame is pointing to address out of bounds\n"); + return -EINVAL; + } + + /* Initialize KM RB frame */ + memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); + + /* Update KM RB frame */ + write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); + write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); + write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); + write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); + write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); + + /* Update the write Pointer in DWORDs */ + psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; + psp_ring_set_wptr(psp, psp_write_ptr_reg); + return 0; +} + static bool psp_check_fw_loading_status(struct amdgpu_device *adev, enum AMDGPU_UCODE_ID ucode_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index bc0947f6bc8a..5f8fd3e3535b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -32,11 +32,13 @@ #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 -#define PSP_ASD_SHARED_MEM_SIZE 0x4000 #define PSP_XGMI_SHARED_MEM_SIZE 0x4000 #define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE 0x400000 +#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 +#define PSP_DTM_SHARED_MEM_SIZE 0x4000 +#define PSP_SHARED_MEM_SIZE 0x4000 struct psp_context; struct psp_xgmi_node_info; @@ -46,6 +48,8 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_SYSDRV = 0x10000, PSP_BL__LOAD_SOSDRV = 0x20000, PSP_BL__LOAD_KEY_DATABASE = 0x80000, + PSP_BL__DRAM_LONG_TRAIN = 0x100000, + PSP_BL__DRAM_SHORT_TRAIN = 0x200000, }; enum psp_ring_type @@ -89,9 +93,6 @@ struct psp_funcs enum psp_ring_type ring_type); int (*ring_destroy)(struct psp_context *psp, enum psp_ring_type ring_type); - int (*cmd_submit)(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index); bool (*compare_sram_data)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, enum AMDGPU_UCODE_ID ucode_type); @@ -108,6 +109,11 @@ struct psp_funcs struct ta_ras_trigger_error_input *info); int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr); int (*rlc_autoload_start)(struct psp_context *psp); + int (*mem_training_init)(struct psp_context *psp); + void (*mem_training_fini)(struct psp_context *psp); + int (*mem_training)(struct psp_context *psp, uint32_t ops); + uint32_t (*ring_get_wptr)(struct psp_context *psp); + void (*ring_set_wptr)(struct psp_context *psp, uint32_t value); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -123,6 +129,11 @@ struct psp_xgmi_topology_info { struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; +struct psp_asd_context { + bool asd_initialized; + uint32_t session_id; +}; + struct psp_xgmi_context { uint8_t initialized; uint32_t session_id; @@ -142,6 +153,65 @@ struct psp_ras_context { struct amdgpu_ras *ras; }; +struct psp_hdcp_context { + bool hdcp_initialized; + uint32_t session_id; + struct amdgpu_bo *hdcp_shared_bo; + uint64_t hdcp_shared_mc_addr; + void *hdcp_shared_buf; +}; + +struct psp_dtm_context { + bool dtm_initialized; + uint32_t session_id; + struct amdgpu_bo *dtm_shared_bo; + uint64_t dtm_shared_mc_addr; + void *dtm_shared_buf; +}; + +#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 +#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 +#define GDDR6_MEM_TRAINING_OFFSET 0x8000 + +enum psp_memory_training_init_flag { + PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, + PSP_MEM_TRAIN_SUPPORT = 0x1, + PSP_MEM_TRAIN_INIT_FAILED = 0x2, + PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4, + PSP_MEM_TRAIN_INIT_SUCCESS = 0x8, +}; + +enum psp_memory_training_ops { + PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1, + PSP_MEM_TRAIN_SAVE = 0x2, + PSP_MEM_TRAIN_RESTORE = 0x4, + PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8, + PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG, + PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG, +}; + +struct psp_memory_training_context { + /*training data size*/ + u64 train_data_size; + /* + * sys_cache + * cpu virtual address + * system memory buffer that used to store the training data. + */ + void *sys_cache; + + /*vram offset of the p2c training data*/ + u64 p2c_train_data_offset; + struct amdgpu_bo *p2c_bo; + + /*vram offset of the c2p training data*/ + u64 c2p_train_data_offset; + struct amdgpu_bo *c2p_bo; + + enum psp_memory_training_init_flag init; + u32 training_cnt; +}; + struct psp_context { struct amdgpu_device *adev; @@ -172,15 +242,12 @@ struct psp_context struct amdgpu_bo *tmr_bo; uint64_t tmr_mc_addr; - /* asd firmware and buffer */ + /* asd firmware */ const struct firmware *asd_fw; uint32_t asd_fw_version; uint32_t asd_feature_version; uint32_t asd_ucode_size; uint8_t *asd_start_addr; - struct amdgpu_bo *asd_shared_bo; - uint64_t asd_shared_mc_addr; - void *asd_shared_buf; /* fence buffer */ struct amdgpu_bo *fence_buf_bo; @@ -206,9 +273,22 @@ struct psp_context uint32_t ta_ras_ucode_version; uint32_t ta_ras_ucode_size; uint8_t *ta_ras_start_addr; + + uint32_t ta_hdcp_ucode_version; + uint32_t ta_hdcp_ucode_size; + uint8_t *ta_hdcp_start_addr; + + uint32_t ta_dtm_ucode_version; + uint32_t ta_dtm_ucode_size; + uint8_t *ta_dtm_start_addr; + + struct psp_asd_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; + struct psp_hdcp_context hdcp_context; + struct psp_dtm_context dtm_context; struct mutex mutex; + struct psp_memory_training_context mem_train_ctx; }; struct amdgpu_psp_funcs { @@ -221,8 +301,6 @@ struct amdgpu_psp_funcs { #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type)) #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type))) -#define psp_cmd_submit(psp, cmd_mc, fence_mc, index) \ - (psp)->funcs->cmd_submit((psp), (cmd_mc), (fence_mc), (index)) #define psp_compare_sram_data(psp, ucode, type) \ (psp)->funcs->compare_sram_data((psp), (ucode), (type)) #define psp_init_microcode(psp) \ @@ -251,6 +329,12 @@ struct amdgpu_psp_funcs { (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL) #define psp_rlc_autoload(psp) \ ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0) +#define psp_mem_training_init(psp) \ + ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0) +#define psp_mem_training_fini(psp) \ + ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0) +#define psp_mem_training(psp, ops) \ + ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) @@ -261,6 +345,9 @@ struct amdgpu_psp_funcs { ((psp)->funcs->ras_cure_posion ? \ (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL) +#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp)) +#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value)) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; @@ -279,10 +366,16 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable); +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, uint32_t value); +int psp_ring_cmd_submit(struct psp_context *psp, + uint64_t cmd_buf_mc_addr, + uint64_t fence_mc_addr, + int index); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 016ea274b955..04394c45aa03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -25,10 +25,13 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/reboot.h> +#include <linux/syscalls.h> #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" const char *ras_error_string[] = { "none", @@ -65,11 +68,16 @@ const char *ras_block_string[] = { /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr); -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr); +enum amdgpu_ras_retire_page_reservation { + AMDGPU_RAS_RETIRE_PAGE_RESERVED, + AMDGPU_RAS_RETIRE_PAGE_PENDING, + AMDGPU_RAS_RETIRE_PAGE_FAULT, +}; + +atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); + +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr); static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -189,6 +197,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } + /** * DOC: AMDGPU RAS debugfs control interface * @@ -208,31 +217,44 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * As their names indicate, inject operation will write the * value to the address. * - * Second member: struct ras_debug_if::op. + * The second member: struct ras_debug_if::op. * It has three kinds of operations. - * 0: disable RAS on the block. Take ::head as its data. - * 1: enable RAS on the block. Take ::head as its data. - * 2: inject errors on the block. Take ::inject as its data. + * + * - 0: disable RAS on the block. Take ::head as its data. + * - 1: enable RAS on the block. Take ::head as its data. + * - 2: inject errors on the block. Take ::inject as its data. * * How to use the interface? - * programs: - * copy the struct ras_debug_if in your codes and initialize it. - * write the struct to the control node. * - * bash: - * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl - * op: disable, enable, inject - * disable: only block is needed - * enable: block and error are needed - * inject: error, address, value are needed - * block: umc, smda, gfx, ......... - * see ras_block_string[] for details - * error: ue, ce - * ue: multi_uncorrectable - * ce: single_correctable - * sub_block: sub block index, pass 0 if there is no sub block + * Programs + * + * Copy the struct ras_debug_if in your codes and initialize it. + * Write the struct to the control node. + * + * Shells + * + * .. code-block:: bash + * + * echo op block [error [sub_block address value]] > .../ras/ras_ctrl + * + * Parameters: + * + * op: disable, enable, inject + * disable: only block is needed + * enable: block and error are needed + * inject: error, address, value are needed + * block: umc, sdma, gfx, ......... + * see ras_block_string[] for details + * error: ue, ce + * ue: multi_uncorrectable + * ce: single_correctable + * sub_block: + * sub block index, pass 0 if there is no sub block + * + * here are some examples for bash commands: + * + * .. code-block:: bash * - * here are some examples for bash commands, * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl @@ -245,8 +267,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * For inject, please check corresponding err count at * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count * - * NOTE: operation is only allowed on blocks which are supported. - * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * .. note:: + * Operations are only allowed on blocks which are supported. + * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * to see which blocks support RAS on a particular asic. + * */ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) @@ -276,6 +301,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * break; } + /* umc ce/ue error injection for a bad page is not allowed */ + if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && + amdgpu_ras_check_bad_page(adev, data.inject.address)) { + DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", + data.inject.address); + break; + } + /* data.inject.address is offset instead of absolute gpu address */ ret = amdgpu_ras_error_inject(adev, &data.inject); break; @@ -290,6 +323,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * return size; } +/** + * DOC: AMDGPU RAS debugfs EEPROM table reset interface + * + * Some boards contain an EEPROM which is used to persistently store a list of + * bad pages which experiences ECC errors in vram. This interface provides + * a way to reset the EEPROM, e.g., after testing error injection. + * + * Usage: + * + * .. code-block:: bash + * + * echo 1 > ../ras/ras_eeprom_reset + * + * will reset EEPROM table to 0 entries. + * + */ +static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + int ret; + + ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); + + return ret == 1 ? size : -EIO; +} + static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { .owner = THIS_MODULE, .read = NULL, @@ -297,6 +357,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_ras_debugfs_eeprom_write, + .llseek = default_llseek +}; + +/** + * DOC: AMDGPU RAS sysfs Error Count Interface + * + * It allows the user to read the error count for each IP block on the gpu through + * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count + * + * It outputs the multiple lines which report the uncorrected (ue) and corrected + * (ce) error counts. + * + * The format of one line is below, + * + * [ce|ue]: count + * + * Example: + * + * .. code-block:: bash + * + * ue: 0 + * ce: 1 + * + */ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, struct device_attribute *attr, char *buf) { @@ -354,7 +442,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, } /* return an obj equal to head, or the first when head is NULL */ -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, struct ras_common_if *head) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -475,15 +563,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) return 0; - ret = psp_ras_enable_features(&adev->psp, &info, enable); - if (ret) { - DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", - enable ? "enable":"disable", - ras_block_str(head->block), - ret); - if (ret == TA_RAS_STATUS__RESET_NEEDED) - return -EAGAIN; - return -EINVAL; + if (!amdgpu_ras_intr_triggered()) { + ret = psp_ras_enable_features(&adev->psp, &info, enable); + if (ret) { + DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", + enable ? "enable":"disable", + ras_block_str(head->block), + ret); + if (ret == TA_RAS_STATUS__RESET_NEEDED) + return -EAGAIN; + return -EINVAL; + } } /* setup the obj */ @@ -615,8 +705,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, adev->gfx.funcs->query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub_funcs->query_ras_error_count) - adev->mmhub_funcs->query_ras_error_count(adev, &err_data); + if (adev->mmhub.funcs->query_ras_error_count) + adev->mmhub.funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__PCIE_BIF: + if (adev->nbio.funcs->query_ras_error_count) + adev->nbio.funcs->query_ras_error_count(adev, &err_data); break; default: break; @@ -628,12 +722,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; - if (err_data.ce_count) + if (err_data.ce_count) { dev_info(adev->dev, "%ld correctable errors detected in %s block\n", obj->err_data.ce_count, ras_block_str(info->head.block)); - if (err_data.ue_count) + } + if (err_data.ue_count) { dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", obj->err_data.ue_count, ras_block_str(info->head.block)); + } return 0; } @@ -664,6 +760,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, break; case AMDGPU_RAS_BLOCK__UMC: case AMDGPU_RAS_BLOCK__MMHUB: + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + case AMDGPU_RAS_BLOCK__PCIE_BIF: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; default: @@ -723,18 +821,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, static char *amdgpu_ras_badpage_flags_str(unsigned int flags) { switch (flags) { - case 0: + case AMDGPU_RAS_RETIRE_PAGE_RESERVED: return "R"; - case 1: + case AMDGPU_RAS_RETIRE_PAGE_PENDING: return "P"; - case 2: + case AMDGPU_RAS_RETIRE_PAGE_FAULT: default: return "F"; }; } -/* - * DOC: ras sysfs gpu_vram_bad_pages interface +/** + * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface * * It allows user to read the bad pages of vram on the gpu through * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages @@ -746,14 +844,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags) * * gpu pfn and gpu page size are printed in hex format. * flags can be one of below character, + * * R: reserved, this gpu page is reserved and not able to use. + * * P: pending for reserve, this gpu page is marked as bad, will be reserved - * in next window of page_reserve. + * in next window of page_reserve. + * * F: unable to reserve. this gpu page can't be reserved due to some reasons. * - * examples: - * 0x00000001 : 0x00001000 : R - * 0x00000002 : 0x00001000 : P + * Examples: + * + * .. code-block:: bash + * + * 0x00000001 : 0x00001000 : R + * 0x00000002 : 0x00001000 : P + * */ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, @@ -927,6 +1032,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) } /* sysfs end */ +/** + * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors + * + * Normally when there is an uncorrectable error, the driver will reset + * the GPU to recover. However, in the event of an unrecoverable error, + * the driver provides an interface to reboot the system automatically + * in that event. + * + * The following file in debugfs provides that interface: + * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot + * + * Usage: + * + * .. code-block:: bash + * + * echo true > .../ras/auto_reboot + * + */ /* debugfs begin */ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { @@ -934,8 +1057,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) struct drm_minor *minor = adev->ddev->primary; con->dir = debugfs_create_dir("ras", minor->debugfs_root); - con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, + adev, &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, + adev, &amdgpu_ras_debugfs_eeprom_ops); + + /* + * After one uncorrectable error happens, usually GPU recovery will + * be scheduled. But due to the known problem in GPU recovery failing + * to bring GPU back, below interface provides one direct way to + * user to reboot system automatically in such case within + * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine + * will never be called. + */ + debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, + &con->reboot); } void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, @@ -980,10 +1116,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_remove(adev, &obj->head); } - debugfs_remove(con->ent); - debugfs_remove(con->dir); + debugfs_remove_recursive(con->dir); con->dir = NULL; - con->ent = NULL; } /* debugfs end */ @@ -1188,15 +1322,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, for (; i < data->count; i++) { (*bps)[i] = (struct ras_badpage){ - .bp = data->bps[i].bp, + .bp = data->bps[i].retired_page, .size = AMDGPU_GPU_PAGE_SIZE, - .flags = 0, + .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; if (data->last_reserved <= i) - (*bps)[i].flags = 1; - else if (data->bps[i].bo == NULL) - (*bps)[i].flags = 2; + (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; + else if (data->bps_bo[i] == NULL) + (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; } *count = data->count; @@ -1214,105 +1348,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) atomic_set(&ras->in_recovery, 0); } -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr) -{ - /* no need to free it actually. */ - amdgpu_bo_free_kernel(bo_ptr, NULL, NULL); - return 0; -} - -/* reserve vram with size@offset */ -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - struct amdgpu_bo *bo; - - if (bo_ptr) - *bo_ptr = NULL; - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; - - r = amdgpu_bo_create(adev, &bp, &bo); - if (r) - return -EINVAL; - - r = amdgpu_bo_reserve(bo, false); - if (r) - goto error_reserve; - - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } - - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx); - if (r) - goto error_pin; - - r = amdgpu_bo_pin_restricted(bo, - AMDGPU_GEM_DOMAIN_VRAM, - offset, - offset + size); - if (r) - goto error_pin; - - if (bo_ptr) - *bo_ptr = bo; - - amdgpu_bo_unreserve(bo); - return r; - -error_pin: - amdgpu_bo_unreserve(bo); -error_reserve: - amdgpu_bo_unref(&bo); - return r; -} - /* alloc/realloc bps array */ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, struct ras_err_handler_data *data, int pages) { unsigned int old_space = data->count + data->space_left; unsigned int new_space = old_space + pages; - unsigned int align_space = ALIGN(new_space, 1024); - void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); - - if (!tmp) + unsigned int align_space = ALIGN(new_space, 512); + void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + struct amdgpu_bo **bps_bo = + kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); + + if (!bps || !bps_bo) { + kfree(bps); + kfree(bps_bo); return -ENOMEM; + } if (data->bps) { - memcpy(tmp, data->bps, + memcpy(bps, data->bps, data->count * sizeof(*data->bps)); kfree(data->bps); } + if (data->bps_bo) { + memcpy(bps_bo, data->bps_bo, + data->count * sizeof(*data->bps_bo)); + kfree(data->bps_bo); + } - data->bps = tmp; + data->bps = bps; + data->bps_bo = bps_bo; data->space_left += align_space - old_space; return 0; } /* it deal with vram only. */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, - unsigned long *bps, int pages) + struct eeprom_table_record *bps, int pages) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; - int i = pages; int ret = 0; if (!con || !con->eh_data || !bps || pages <= 0) @@ -1329,24 +1404,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, goto out; } - while (i--) - data->bps[data->count++].bp = bps[i]; - + memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); + data->count += pages; data->space_left -= pages; + out: mutex_unlock(&con->recovery_lock); return ret; } +/* + * write error record array to eeprom, the function should be + * protected by recovery_lock + */ +static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_err_handler_data *data; + struct amdgpu_ras_eeprom_control *control; + int save_count; + + if (!con || !con->eh_data) + return 0; + + control = &con->eeprom_control; + data = con->eh_data; + save_count = data->count - control->num_recs; + /* only new entries are saved */ + if (save_count > 0) + if (amdgpu_ras_eeprom_process_recods(control, + &data->bps[control->num_recs], + true, + save_count)) { + DRM_ERROR("Failed to save EEPROM table data!"); + return -EIO; + } + + return 0; +} + +/* + * read error record array in eeprom and reserve enough space for + * storing new bad pages + */ +static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_ras_eeprom_control *control = + &adev->psp.ras.ras->eeprom_control; + struct eeprom_table_record *bps = NULL; + int ret = 0; + + /* no bad page record, skip eeprom access */ + if (!control->num_recs) + return ret; + + bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); + if (!bps) + return -ENOMEM; + + if (amdgpu_ras_eeprom_process_recods(control, bps, false, + control->num_recs)) { + DRM_ERROR("Failed to load EEPROM table records!"); + ret = -EIO; + goto out; + } + + ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); + +out: + kfree(bps); + return ret; +} + +/* + * check if an address belongs to bad page + * + * Note: this check is only for umc block + */ +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_err_handler_data *data; + int i; + bool ret = false; + + if (!con || !con->eh_data) + return ret; + + mutex_lock(&con->recovery_lock); + data = con->eh_data; + if (!data) + goto out; + + addr >>= AMDGPU_GPU_PAGE_SHIFT; + for (i = 0; i < data->count; i++) + if (addr == data->bps[i].retired_page) { + ret = true; + goto out; + } + +out: + mutex_unlock(&con->recovery_lock); + return ret; +} + /* called in gpu recovery/init */ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; uint64_t bp; - struct amdgpu_bo *bo; - int i; + struct amdgpu_bo *bo = NULL; + int i, ret = 0; if (!con || !con->eh_data) return 0; @@ -1357,18 +1528,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) goto out; /* reserve vram at driver post stage. */ for (i = data->last_reserved; i < data->count; i++) { - bp = data->bps[i].bp; + bp = data->bps[i].retired_page; - if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT, - PAGE_SIZE, &bo)) - DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp); + /* There are two cases of reserve error should be ignored: + * 1) a ras bad page has been allocated (used by someone); + * 2) a ras bad page has been reserved (duplicate error injection + * for one page); + */ + if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL)) + DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); - data->bps[i].bo = bo; + data->bps_bo[i] = bo; data->last_reserved = i + 1; + bo = NULL; } + + /* continue to save bad pages to eeprom even reesrve_vram fails */ + ret = amdgpu_ras_save_bad_pages(adev); out: mutex_unlock(&con->recovery_lock); - return 0; + return ret; } /* called when driver unload */ @@ -1388,11 +1570,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) goto out; for (i = data->last_reserved - 1; i >= 0; i--) { - bo = data->bps[i].bo; + bo = data->bps_bo[i]; - amdgpu_ras_release_vram(adev, &bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); - data->bps[i].bo = bo; + data->bps_bo[i] = bo; data->last_reserved = i; } out: @@ -1400,41 +1582,54 @@ out: return 0; } -static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) -{ - /* TODO - * write the array to eeprom when SMU disabled. - */ - return 0; -} - -static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) -{ - /* TODO - * read the array to eeprom when SMU disabled. - */ - return 0; -} - -static int amdgpu_ras_recovery_init(struct amdgpu_device *adev) +int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data **data = &con->eh_data; + struct ras_err_handler_data **data; + int ret; - *data = kmalloc(sizeof(**data), - GFP_KERNEL|__GFP_ZERO); - if (!*data) - return -ENOMEM; + if (con) + data = &con->eh_data; + else + return 0; + + *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); + if (!*data) { + ret = -ENOMEM; + goto out; + } mutex_init(&con->recovery_lock); INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); atomic_set(&con->in_recovery, 0); con->adev = adev; - amdgpu_ras_load_bad_pages(adev); - amdgpu_ras_reserve_bad_pages(adev); + ret = amdgpu_ras_eeprom_init(&con->eeprom_control); + if (ret) + goto free; + + if (con->eeprom_control.num_recs) { + ret = amdgpu_ras_load_bad_pages(adev); + if (ret) + goto free; + ret = amdgpu_ras_reserve_bad_pages(adev); + if (ret) + goto release; + } return 0; + +release: + amdgpu_ras_release_bad_pages(adev); +free: + kfree((*data)->bps); + kfree((*data)->bps_bo); + kfree(*data); + con->eh_data = NULL; +out: + DRM_WARN("Failed to initialize ras recovery!\n"); + + return ret; } static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) @@ -1442,13 +1637,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data = con->eh_data; + /* recovery_init failed to init it, fini is useless */ + if (!data) + return 0; + cancel_work_sync(&con->recovery_work); - amdgpu_ras_save_bad_pages(adev); amdgpu_ras_release_bad_pages(adev); mutex_lock(&con->recovery_lock); con->eh_data = NULL; kfree(data->bps); + kfree(data->bps_bo); kfree(data); mutex_unlock(&con->recovery_lock); @@ -1485,7 +1684,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = 0; if (amdgpu_sriov_vf(adev) || - adev->asic_type != CHIP_VEGA20) + (adev->asic_type != CHIP_VEGA20 && + adev->asic_type != CHIP_ARCTURUS)) return; if (adev->is_atom_fw && @@ -1500,6 +1700,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int r; if (con) return 0; @@ -1527,31 +1728,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* Might need get this flag from vbios. */ con->flags = RAS_DEFAULT_FLAGS; - if (amdgpu_ras_recovery_init(adev)) - goto recovery_out; + if (adev->nbio.funcs->init_ras_controller_interrupt) { + r = adev->nbio.funcs->init_ras_controller_interrupt(adev); + if (r) + return r; + } + + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { + r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); + if (r) + return r; + } amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; if (amdgpu_ras_fs_init(adev)) goto fs_out; - /* ras init for each ras block */ - if (adev->umc.funcs->ras_init) - adev->umc.funcs->ras_init(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); return 0; fs_out: - amdgpu_ras_recovery_fini(adev); -recovery_out: amdgpu_ras_set_context(adev, NULL); kfree(con); return -EINVAL; } +/* helper function to handle common stuff in ip late init phase */ +int amdgpu_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_fs_if *fs_info, + struct ras_ih_if *ih_info) +{ + int r; + + /* disable RAS feature per IP block if it is not supported */ + if (!amdgpu_ras_is_supported(adev, ras_block->block)) { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); + return 0; + } + + r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); + if (r) { + if (r == -EAGAIN) { + /* request gpu reset. will run again */ + amdgpu_ras_request_reset_on_boot(adev, + ras_block->block); + return 0; + } else if (adev->in_suspend || adev->in_gpu_reset) { + /* in resume phase, if fail to enable ras, + * clean up all ras fs nodes, and disable ras */ + goto cleanup; + } else + return r; + } + + /* in resume phase, no need to create ras fs node */ + if (adev->in_suspend || adev->in_gpu_reset) + return 0; + + if (ih_info->cb) { + r = amdgpu_ras_interrupt_add_handler(adev, ih_info); + if (r) + goto interrupt; + } + + amdgpu_ras_debugfs_create(adev, fs_info); + + r = amdgpu_ras_sysfs_create(adev, fs_info); + if (r) + goto sysfs; + + return 0; +cleanup: + amdgpu_ras_sysfs_remove(adev, ras_block); +sysfs: + amdgpu_ras_debugfs_remove(adev, ras_block); + if (ih_info->cb) + amdgpu_ras_interrupt_remove_handler(adev, ih_info); +interrupt: + amdgpu_ras_feature_enable(adev, ras_block, 0); + return r; +} + +/* helper function to remove ras fs node and interrupt handler */ +void amdgpu_ras_late_fini(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_ih_if *ih_info) +{ + if (!ras_block || !ih_info) + return; + + amdgpu_ras_sysfs_remove(adev, ras_block); + amdgpu_ras_debugfs_remove(adev, ras_block); + if (ih_info->cb) + amdgpu_ras_interrupt_remove_handler(adev, ih_info); + amdgpu_ras_feature_enable(adev, ras_block, 0); +} + /* do some init work after IP late init as dependence. * and it runs in resume/gpu reset/booting up cases. */ @@ -1645,3 +1921,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) return 0; } + +void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) +{ + uint32_t hw_supported, supported; + + amdgpu_ras_check_supported(adev, &hw_supported, &supported); + if (!hw_supported) + return; + + if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { + DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); + + amdgpu_ras_reset_gpu(adev, false); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6c76bb2a6843..d4ade4739245 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -317,8 +317,6 @@ struct amdgpu_ras { struct list_head head; /* debugfs */ struct dentry *dir; - /* debugfs ctrl */ - struct dentry *ent; /* sysfs */ struct device_attribute features_attr; struct bin_attribute badpages_attr; @@ -334,7 +332,7 @@ struct amdgpu_ras { struct mutex recovery_lock; uint32_t flags; - + bool reboot; struct amdgpu_ras_eeprom_control eeprom_control; }; @@ -347,15 +345,14 @@ struct ras_err_data { unsigned long ue_count; unsigned long ce_count; unsigned long err_addr_cnt; - uint64_t *err_addr; + struct eeprom_table_record *err_addr; }; struct ras_err_handler_data { - /* point to bad pages array */ - struct { - unsigned long bp; - struct amdgpu_bo *bo; - } *bps; + /* point to bad page records array */ + struct eeprom_table_record *bps; + /* point to reserved bo array */ + struct amdgpu_bo **bps_bo; /* the count of entries */ int count; /* the space can place new entries */ @@ -365,7 +362,7 @@ struct ras_err_handler_data { }; typedef int (*ras_ih_cb)(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry); struct ras_ih_data { @@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, return ras && (ras->supported & (1 << block)); } +int amdgpu_ras_recovery_init(struct amdgpu_device *adev); int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, unsigned int block); @@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, - unsigned long *bps, int pages); + struct eeprom_table_record *bps, int pages); int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev); @@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev, { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + /* save bad page to eeprom before gpu reset, + * i2c may be unstable in gpu reset + */ + if (in_task()) + amdgpu_ras_reserve_bad_pages(adev); + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) schedule_work(&ras->recovery_work); return 0; @@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) { int amdgpu_ras_init(struct amdgpu_device *adev); int amdgpu_ras_fini(struct amdgpu_device *adev); int amdgpu_ras_pre_fini(struct amdgpu_device *adev); +int amdgpu_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_fs_if *fs_info, + struct ras_ih_if *ih_info); +void amdgpu_ras_late_fini(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_ih_if *ih_info); int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable); @@ -599,4 +610,22 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info); + +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, + struct ras_common_if *head); + +extern atomic_t amdgpu_ras_in_intr; + +static inline bool amdgpu_ras_intr_triggered(void) +{ + return !!atomic_read(&amdgpu_ras_in_intr); +} + +static inline void amdgpu_ras_intr_cleared(void) +{ + atomic_set(&amdgpu_ras_in_intr, 0); +} + +void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 8a32b5c93778..2a8e04895595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -27,7 +27,8 @@ #include <linux/bits.h> #include "smu_v11_0_i2c.h" -#define EEPROM_I2C_TARGET_ADDR 0xA0 +#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8 +#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0 /* * The 2 macros bellow represent the actual size in bytes that @@ -83,7 +84,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, { int ret = 0; struct i2c_msg msg = { - .addr = EEPROM_I2C_TARGET_ADDR, + .addr = 0, .flags = 0, .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, .buf = buff, @@ -93,6 +94,8 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, *(uint16_t *)buff = EEPROM_HDR_START; __encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE); + msg.addr = control->i2c_address; + ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); if (ret < 1) DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret); @@ -100,7 +103,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, return ret; } -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control); + + +static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) +{ + int i; + uint32_t tbl_sum = 0; + + /* Header checksum, skip checksum field in the calculation */ + for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) + tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); + + return tbl_sum; +} + +static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, + int num) +{ + int i, j; + uint32_t tbl_sum = 0; + + /* Records checksum */ + for (i = 0; i < num; i++) { + struct eeprom_table_record *record = &records[i]; + + for (j = 0; j < sizeof(*record); j++) { + tbl_sum += *(((unsigned char *)record) + j); + } + } + + return tbl_sum; +} + +static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num) +{ + return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); +} + +/* Checksum = 256 -((sum of all table entries) mod 256) */ +static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num, + uint32_t old_hdr_byte_sum) +{ + /* + * This will update the table sum with new records. + * + * TODO: What happens when the EEPROM table is to be wrapped around + * and old records from start will get overridden. + */ + + /* need to recalculate updated header byte sum */ + control->tbl_byte_sum -= old_hdr_byte_sum; + control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); + + control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); +} + +/* table sum mod 256 + checksum must equals 256 */ +static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num) +{ + control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); + + if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { + DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); + return false; + } + + return true; +} + +int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) +{ + unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + int ret = 0; + + mutex_lock(&control->tbl_mutex); + + hdr->header = EEPROM_TABLE_HDR_VAL; + hdr->version = EEPROM_TABLE_VER; + hdr->first_rec_offset = EEPROM_RECORD_START; + hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; + + control->tbl_byte_sum = 0; + __update_tbl_checksum(control, NULL, 0, 0); + control->next_addr = EEPROM_RECORD_START; + + ret = __update_table_header(control, buff); + + mutex_unlock(&control->tbl_mutex); + + return ret; + +} int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) { @@ -109,7 +206,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; struct i2c_msg msg = { - .addr = EEPROM_I2C_TARGET_ADDR, + .addr = 0, .flags = I2C_M_RD, .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, .buf = buff, @@ -119,9 +216,15 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) switch (adev->asic_type) { case CHIP_VEGA20: + control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20; ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); break; + case CHIP_ARCTURUS: + control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS; + ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); + break; + default: return 0; } @@ -131,6 +234,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) return ret; } + msg.addr = control->i2c_address; + /* Read/Create table header from EEPROM address 0 */ ret = i2c_transfer(&control->eeprom_accessor, &msg, 1); if (ret < 1) { @@ -143,25 +248,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) if (hdr->header == EEPROM_TABLE_HDR_VAL) { control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE; + control->tbl_byte_sum = __calc_hdr_byte_sum(control); + control->next_addr = EEPROM_RECORD_START; + DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", control->num_recs); } else { DRM_INFO("Creating new EEPROM table"); - hdr->header = EEPROM_TABLE_HDR_VAL; - hdr->version = EEPROM_TABLE_VER; - hdr->first_rec_offset = EEPROM_RECORD_START; - hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; - - adev->psp.ras.ras->eeprom_control.tbl_byte_sum = - __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control); - ret = __update_table_header(control, buff); + ret = amdgpu_ras_eeprom_reset_table(control); } - /* Start inserting records from here */ - adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START; - return ret == 1 ? 0 : -EIO; } @@ -173,6 +271,9 @@ void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control) case CHIP_VEGA20: smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor); break; + case CHIP_ARCTURUS: + smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor); + break; default: return; @@ -226,8 +327,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); i += 6; - buff[i++] = record->mem_channel; - buff[i++] = record->mcumc_id; + record->mem_channel = buff[i++]; + record->mcumc_id = buff[i++]; memcpy(&tmp, buff + i, 6); record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff); @@ -266,87 +367,18 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address) return curr_address; } - -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) -{ - int i; - uint32_t tbl_sum = 0; - - /* Header checksum, skip checksum field in the calculation */ - for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) - tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); - - return tbl_sum; -} - -static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, - int num) -{ - int i, j; - uint32_t tbl_sum = 0; - - /* Records checksum */ - for (i = 0; i < num; i++) { - struct eeprom_table_record *record = &records[i]; - - for (j = 0; j < sizeof(*record); j++) { - tbl_sum += *(((unsigned char *)record) + j); - } - } - - return tbl_sum; -} - -static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) -{ - return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); -} - -/* Checksum = 256 -((sum of all table entries) mod 256) */ -static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num, - uint32_t old_hdr_byte_sum) -{ - /* - * This will update the table sum with new records. - * - * TODO: What happens when the EEPROM table is to be wrapped around - * and old records from start will get overridden. - */ - - /* need to recalculate updated header byte sum */ - control->tbl_byte_sum -= old_hdr_byte_sum; - control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); - - control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); -} - -/* table sum mod 256 + checksum must equals 256 */ -static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) -{ - control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); - - if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { - DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); - return false; - } - - return true; -} - int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, bool write, int num) { int i, ret = 0; - struct i2c_msg *msgs; - unsigned char *buffs; + struct i2c_msg *msgs, *msg; + unsigned char *buffs, *buff; + struct eeprom_table_record *record; struct amdgpu_device *adev = to_amdgpu_device(control); - if (adev->asic_type != CHIP_VEGA20) + if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS) return 0; buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE, @@ -373,9 +405,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, * 256b */ for (i = 0; i < num; i++) { - unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - struct eeprom_table_record *record = &records[i]; - struct i2c_msg *msg = &msgs[i]; + buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; + record = &records[i]; + msg = &msgs[i]; control->next_addr = __correct_eeprom_dest_address(control->next_addr); @@ -383,8 +415,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, * Update bits 16,17 of EEPROM address in I2C address by setting them * to bits 1,2 of Device address byte */ - msg->addr = EEPROM_I2C_TARGET_ADDR | - ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15); + msg->addr = control->i2c_address | + ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15); msg->flags = write ? 0 : I2C_M_RD; msg->len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE; msg->buf = buff; @@ -415,8 +447,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, if (!write) { for (i = 0; i < num; i++) { - unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - struct eeprom_table_record *record = &records[i]; + buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; + record = &records[i]; __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 41f3fcb9a29b..ca78f812d436 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -50,6 +50,7 @@ struct amdgpu_ras_eeprom_control { struct mutex tbl_mutex; bool bus_locked; uint32_t tbl_byte_sum; + uint16_t i2c_address; // 8-bit represented address }; /* @@ -79,6 +80,7 @@ struct eeprom_table_record { int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control); +int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index c8793e6cc3c5..6373bfb47d55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) */ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) { - volatile u32 *dst_ptr; u32 dws; int r; /* allocate clear state block */ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) return r; } - /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 5c13c503e61f..6010999d9020 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "amdgpu_sdma.h" +#include "amdgpu_ras.h" #define AMDGPU_CSA_SDMA_SIZE 64 /* SDMA CSA reside in the 3rd page of CSA */ @@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, return csa_mc_addr; } + +int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, + void *ras_ih_info) +{ + int r, i; + struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; + struct ras_fs_if fs_info = { + .sysfs_name = "sdma_err_count", + .debugfs_name = "sdma_err_inject", + }; + + if (!ih_info) + return -EINVAL; + + if (!adev->sdma.ras_if) { + adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->sdma.ras_if) + return -ENOMEM; + adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA; + adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->sdma.ras_if->sub_block_index = 0; + strcpy(adev->sdma.ras_if->name, "sdma"); + } + fs_info.head = ih_info->head = *adev->sdma.ras_if; + + r = amdgpu_ras_late_init(adev, adev->sdma.ras_if, + &fs_info, ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (r) + goto late_fini; + } + } else { + r = 0; + goto free; + } + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info); +free: + kfree(adev->sdma.ras_if); + adev->sdma.ras_if = NULL; + return r; +} + +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && + adev->sdma.ras_if) { + struct ras_common_if *ras_if = adev->sdma.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + /* the cb member will not be used by + * amdgpu_ras_interrupt_remove_handler, init it only + * to cheat the check in ras_late_fini + */ + .cb = amdgpu_sdma_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + amdgpu_ras_reset_gpu(adev, 0); + + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->sdma.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index a9ae0d8a0589..761ff8be6314 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -104,4 +104,13 @@ struct amdgpu_sdma_instance * amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); +int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, + void *ras_ih_info); +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); +int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 95e5e93edd18..a09b6b9c27d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -129,7 +129,8 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep, * Tries to add the fence to an existing hash entry. Returns true when an entry * was found, false otherwise. */ -static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit) +static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, + bool explicit) { struct amdgpu_sync_entry *e; @@ -151,19 +152,18 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to - * @fence: fence to sync to + * @f: fence to sync to + * @explicit: if this is an explicit dependency * + * Add the fence to the sync object. */ -int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f, bool explicit) +int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, + bool explicit) { struct amdgpu_sync_entry *e; if (!f) return 0; - if (amdgpu_sync_same_dev(adev, f) && - amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM) - amdgpu_sync_keep_later(&sync->last_vm_update, f); if (amdgpu_sync_add_later(sync, f, explicit)) return 0; @@ -180,6 +180,24 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, } /** + * amdgpu_sync_vm_fence - remember to sync to this VM fence + * + * @adev: amdgpu device + * @sync: sync object to add fence to + * @fence: the VM fence to add + * + * Add the fence to the sync object and remember it as VM update. + */ +int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) +{ + if (!fence) + return 0; + + amdgpu_sync_keep_later(&sync->last_vm_update, fence); + return amdgpu_sync_fence(sync, fence, false); +} + +/** * amdgpu_sync_resv - sync to a reservation object * * @sync: sync object to add fences from reservation object to @@ -204,7 +222,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = dma_resv_get_excl(resv); - r = amdgpu_sync_fence(adev, sync, f, false); + r = amdgpu_sync_fence(sync, f, false); flist = dma_resv_get_list(resv); if (!flist || r) @@ -222,13 +240,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, continue; if (amdgpu_sync_same_dev(adev, f)) { - /* VM updates are only interesting - * for other VM updates and moves. + /* VM updates only sync with moves but not with user + * command submissions or KFD evictions fences */ - if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) && - (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) && - ((owner == AMDGPU_FENCE_OWNER_VM) != - (fence_owner == AMDGPU_FENCE_OWNER_VM))) + if (owner == AMDGPU_FENCE_OWNER_VM && + fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) continue; /* Ignore fence from the same owner and explicit one as @@ -239,7 +255,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, continue; } - r = amdgpu_sync_fence(adev, sync, f, false); + r = amdgpu_sync_fence(sync, f, false); if (r) break; } @@ -340,7 +356,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) hash_for_each_safe(source->fences, i, tmp, e, node) { f = e->fence; if (!dma_fence_is_signaled(f)) { - r = amdgpu_sync_fence(NULL, clone, f, e->explicit); + r = amdgpu_sync_fence(clone, f, e->explicit); if (r) return r; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index b5f1778a2319..d62c2b81d92b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -40,8 +40,9 @@ struct amdgpu_sync { }; void amdgpu_sync_create(struct amdgpu_sync *sync); -int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f, bool explicit); +int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f, + bool explicit); +int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, @@ -49,7 +50,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, bool explicit_sync); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); -struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit); +struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, + bool *explicit); int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); void amdgpu_sync_free(struct amdgpu_sync *sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b66d29d5ffa2..b158230af8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 77674a7b9616..63e734a125fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __field(unsigned int, context) __field(unsigned int, seqno) __field(struct dma_fence *, fence) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); TRACE_EVENT(amdgpu_sched_run_job, @@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __field(unsigned int, context) __field(unsigned int, seqno) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs, TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint64_t flags), - TP_ARGS(pe, addr, count, incr, flags), + uint32_t incr, uint64_t flags, bool direct), + TP_ARGS(pe, addr, count, incr, flags, direct), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) __field(u64, flags) + __field(bool, direct) ), TP_fast_assign( @@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->count = count; __entry->incr = incr; __entry->flags = flags; + __entry->direct = direct; ), - TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u", - __entry->pe, __entry->addr, __entry->incr, - __entry->flags, __entry->count) + TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, " + "direct=%d", __entry->pe, __entry->addr, __entry->incr, + __entry->flags, __entry->count, __entry->direct) ); TRACE_EVENT(amdgpu_vm_copy_ptes, - TP_PROTO(uint64_t pe, uint64_t src, unsigned count), - TP_ARGS(pe, src, count), + TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct), + TP_ARGS(pe, src, count, direct), TP_STRUCT__entry( __field(u64, pe) __field(u64, src) __field(u32, count) + __field(bool, direct) ), TP_fast_assign( __entry->pe = pe; __entry->src = src; __entry->count = count; + __entry->direct = direct; ), - TP_printk("pe=%010Lx, src=%010Lx, count=%u", - __entry->pe, __entry->src, __entry->count) + TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d", + __entry->pe, __entry->src, __entry->count, + __entry->direct) ); TRACE_EVENT(amdgpu_vm_flush, @@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __field(const char *,name) + __string(ring, sched_job->base.sched->name) __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx) @@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, ), TP_fast_assign( - __entry->name = sched_job->base.sched->name; + __assign_str(ring, sched_job->base.sched->name) __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", - __entry->name, __entry->id, + __get_str(ring), __entry->id, __entry->fence, __entry->ctx, __entry->seqno) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c19100ced040..445de594c214 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -35,6 +35,7 @@ #include <linux/hmm.h> #include <linux/pagemap.h> #include <linux/sched/task.h> +#include <linux/sched/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/swap.h> @@ -55,6 +56,7 @@ #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_sdma.h" +#include "amdgpu_ras.h" #include "bif/bif_4_1_d.h" static int amdgpu_map_buffer(struct ttm_buffer_object *bo, @@ -485,15 +487,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { - struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; - adev = amdgpu_ttm_adev(bo->bdev); - /* create space/pages for new_mem in GTT space */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; @@ -544,15 +543,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { - struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; - adev = amdgpu_ttm_adev(bo->bdev); - /* make space in GTT for old_mem buffer */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; @@ -774,6 +770,20 @@ struct amdgpu_ttm_tt { #endif }; +#ifdef CONFIG_DRM_AMDGPU_USERPTR +/* flags used by HMM internal, not related to CPU/GPU PTE flags */ +static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { + (1 << 0), /* HMM_PFN_VALID */ + (1 << 1), /* HMM_PFN_WRITE */ + 0 /* HMM_PFN_DEVICE_PRIVATE */ +}; + +static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { + 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ + 0, /* HMM_PFN_NONE */ + 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ +}; + /** * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update @@ -781,85 +791,89 @@ struct amdgpu_ttm_tt { * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * once afterwards to stop HMM tracking */ -#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) - -#define MAX_RETRY_HMM_RANGE_FAULT 16 - int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) { - struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL; struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm; - struct mm_struct *mm = gtt->usertask->mm; unsigned long start = gtt->userptr; struct vm_area_struct *vma; struct hmm_range *range; + unsigned long timeout; + struct mm_struct *mm; unsigned long i; - uint64_t *pfns; int r = 0; - if (!mm) /* Happens during process shutdown */ - return -ESRCH; - - if (unlikely(!mirror)) { - DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n"); - r = -EFAULT; - goto out; + mm = bo->notifier.mm; + if (unlikely(!mm)) { + DRM_DEBUG_DRIVER("BO is not registered?\n"); + return -EFAULT; } - vma = find_vma(mm, start); - if (unlikely(!vma || start < vma->vm_start)) { - r = -EFAULT; - goto out; - } - if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && - vma->vm_file)) { - r = -EPERM; - goto out; - } + /* Another get_user_pages is running at the same time?? */ + if (WARN_ON(gtt->range)) + return -EFAULT; + + if (!mmget_not_zero(mm)) /* Happens during process shutdown */ + return -ESRCH; range = kzalloc(sizeof(*range), GFP_KERNEL); if (unlikely(!range)) { r = -ENOMEM; goto out; } + range->notifier = &bo->notifier; + range->flags = hmm_range_flags; + range->values = hmm_range_values; + range->pfn_shift = PAGE_SHIFT; + range->start = bo->notifier.interval_tree.start; + range->end = bo->notifier.interval_tree.last + 1; + range->default_flags = hmm_range_flags[HMM_PFN_VALID]; + if (!amdgpu_ttm_tt_is_readonly(ttm)) + range->default_flags |= range->flags[HMM_PFN_WRITE]; - pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL); - if (unlikely(!pfns)) { + range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), + GFP_KERNEL); + if (unlikely(!range->pfns)) { r = -ENOMEM; goto out_free_ranges; } - amdgpu_hmm_init_range(range); - range->default_flags = range->flags[HMM_PFN_VALID]; - range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ? - 0 : range->flags[HMM_PFN_WRITE]; - range->pfn_flags_mask = 0; - range->pfns = pfns; - range->start = start; - range->end = start + ttm->num_pages * PAGE_SIZE; - - hmm_range_register(range, mirror); + down_read(&mm->mmap_sem); + vma = find_vma(mm, start); + if (unlikely(!vma || start < vma->vm_start)) { + r = -EFAULT; + goto out_unlock; + } + if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && + vma->vm_file)) { + r = -EPERM; + goto out_unlock; + } + up_read(&mm->mmap_sem); + timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); - /* - * Just wait for range to be valid, safe to ignore return value as we - * will use the return value of hmm_range_fault() below under the - * mmap_sem to ascertain the validity of the range. - */ - hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT); +retry: + range->notifier_seq = mmu_interval_read_begin(&bo->notifier); down_read(&mm->mmap_sem); r = hmm_range_fault(range, 0); up_read(&mm->mmap_sem); - - if (unlikely(r < 0)) + if (unlikely(r <= 0)) { + /* + * FIXME: This timeout should encompass the retry from + * mmu_interval_read_retry() as well. + */ + if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout)) + goto retry; goto out_free_pfns; + } for (i = 0; i < ttm->num_pages; i++) { - pages[i] = hmm_device_entry_to_page(range, pfns[i]); + /* FIXME: The pages cannot be touched outside the notifier_lock */ + pages[i] = hmm_device_entry_to_page(range, range->pfns[i]); if (unlikely(!pages[i])) { pr_err("Page fault failed for pfn[%lu] = 0x%llx\n", - i, pfns[i]); + i, range->pfns[i]); r = -ENOMEM; goto out_free_pfns; @@ -867,15 +881,18 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) } gtt->range = range; + mmput(mm); return 0; +out_unlock: + up_read(&mm->mmap_sem); out_free_pfns: - hmm_range_unregister(range); - kvfree(pfns); + kvfree(range->pfns); out_free_ranges: kfree(range); out: + mmput(mm); return r; } @@ -900,15 +917,18 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) "No user pages to check\n"); if (gtt->range) { - r = hmm_range_valid(gtt->range); - hmm_range_unregister(gtt->range); - + /* + * FIXME: Must always hold notifier_lock for this, and must + * not ignore the return code. + */ + r = mmu_interval_read_retry(gtt->range->notifier, + gtt->range->notifier_seq); kvfree(gtt->range->pfns); kfree(gtt->range); gtt->range = NULL; } - return r; + return !r; } #endif @@ -989,10 +1009,18 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) - if (gtt->range && - ttm->pages[0] == hmm_device_entry_to_page(gtt->range, - gtt->range->pfns[0])) - WARN_ONCE(1, "Missing get_user_page_done\n"); + if (gtt->range) { + unsigned long i; + + for (i = 0; i < ttm->num_pages; i++) { + if (ttm->pages[i] != + hmm_device_entry_to_page(gtt->range, + gtt->range->pfns[i])) + break; + } + + WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); + } #endif } @@ -1219,11 +1247,8 @@ static struct ttm_backend_func amdgpu_backend_func = { static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { - struct amdgpu_device *adev; struct amdgpu_ttm_tt *gtt; - adev = amdgpu_ttm_adev(bo->bdev); - gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; @@ -1497,11 +1522,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, struct dma_fence *f; int i; - /* Don't evict VM page tables while they are busy, otherwise we can't - * cleanly handle page faults. - */ if (bo->type == ttm_bo_type_kernel && - !dma_resv_test_signaled_rcu(bo->base.resv, true)) + !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) return false; /* If bo is a KFD BO, check if the bo belongs to the current process. @@ -1656,81 +1678,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) */ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - u64 vram_size = adev->gmc.visible_vram_size; - u64 offset = adev->fw_vram_usage.start_offset; - u64 size = adev->fw_vram_usage.size; - struct amdgpu_bo *bo; - - memset(&bp, 0, sizeof(bp)); - bp.size = adev->fw_vram_usage.size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; + uint64_t vram_size = adev->gmc.visible_vram_size; + adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.reserved_bo = NULL; - if (adev->fw_vram_usage.size > 0 && - adev->fw_vram_usage.size <= vram_size) { + if (adev->fw_vram_usage.size == 0 || + adev->fw_vram_usage.size > vram_size) + return 0; - r = amdgpu_bo_create(adev, &bp, - &adev->fw_vram_usage.reserved_bo); - if (r) - goto error_create; + return amdgpu_bo_create_kernel_at(adev, + adev->fw_vram_usage.start_offset, + adev->fw_vram_usage.size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->fw_vram_usage.reserved_bo, + &adev->fw_vram_usage.va); +} - r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); - if (r) - goto error_reserve; +/* + * Memoy training reservation functions + */ - /* remove the original mem node and create a new one at the - * request position - */ - bo = adev->fw_vram_usage.reserved_bo; - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } +/** + * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram + * + * @adev: amdgpu_device pointer + * + * free memory training reserved vram if it has been reserved. + */ +static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) +{ + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, - &bo->tbo.mem, &ctx); - if (r) - goto error_pin; + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); + ctx->c2p_bo = NULL; - r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, - AMDGPU_GEM_DOMAIN_VRAM, - adev->fw_vram_usage.start_offset, - (adev->fw_vram_usage.start_offset + - adev->fw_vram_usage.size)); - if (r) - goto error_pin; - r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); - if (r) - goto error_kmap; + amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); + ctx->p2c_bo = NULL; + + return 0; +} + +/** + * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training + * + * @adev: amdgpu_device pointer + * + * create bo vram reservation from memory training. + */ +static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) +{ + int ret; + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; + + memset(ctx, 0, sizeof(*ctx)); + if (!adev->fw_vram_usage.mem_train_support) { + DRM_DEBUG("memory training does not support!\n"); + return 0; + } + + ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; + ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); + ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); + ret = amdgpu_bo_create_kernel_at(adev, + ctx->p2c_train_data_offset, + ctx->train_data_size, + AMDGPU_GEM_DOMAIN_VRAM, + &ctx->p2c_bo, + NULL); + if (ret) { + DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); + goto Err_out; } - return r; -error_kmap: - amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); -error_pin: - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); -error_reserve: - amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); -error_create: - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; - return r; + ret = amdgpu_bo_create_kernel_at(adev, + ctx->c2p_train_data_offset, + ctx->train_data_size, + AMDGPU_GEM_DOMAIN_VRAM, + &ctx->c2p_bo, + NULL); + if (ret) { + DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + goto Err_out; + } + + ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; + return 0; + +Err_out: + amdgpu_ttm_training_reserve_vram_fini(adev); + return ret; } + /** * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1794,6 +1840,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* + *The reserved vram for memory training must be pinned to the specified + *place on the VRAM, so reserve it early. + */ + r = amdgpu_ttm_training_reserve_vram_init(adev); + if (r) + return r; + /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS @@ -1804,6 +1858,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) NULL, &stolen_vga_buf); if (r) return r; + + /* + * reserve one TMR (64K) memory at the top of VRAM which holds + * IP Discovery data and is protected by PSP. + */ + r = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, + DISCOVERY_TMR_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->discovery_memory, + NULL); + if (r) + return r; + DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); @@ -1879,7 +1947,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) return; amdgpu_ttm_debugfs_fini(adev); + amdgpu_ttm_training_reserve_vram_fini(adev); + /* return the IP Discovery TMR memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); + if (adev->mman.aper_base_kaddr) iounmap(adev->mman.aper_base_kaddr); adev->mman.aper_base_kaddr = NULL; @@ -1975,10 +2047,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE; - num_dw = adev->mman.buffer_funcs->copy_num_dw; - while (num_dw & 0x7) - num_dw++; - + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8; r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); @@ -2038,11 +2107,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, max_bytes = adev->mman.buffer_funcs->copy_max_bytes; num_loops = DIV_ROUND_UP(byte_count, max_bytes); - num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; - - /* for IB padding */ - while (num_dw & 0x7) - num_dw++; + num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3a6115ad0196..9ef312428231 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_RAVEN: case CHIP_VEGA12: case CHIP_VEGA20: + case CHIP_ARCTURUS: case CHIP_RENOIR: case CHIP_NAVI10: case CHIP_NAVI14: @@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; - case CHIP_ARCTURUS: - return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); @@ -448,6 +447,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; + const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; if (NULL == ucode->fw) return 0; @@ -461,6 +461,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, header = (const struct common_firmware_header *)ucode->fw->data; cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data; + dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && @@ -471,7 +472,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) { + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV && + ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) { ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + @@ -507,6 +509,12 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, le32_to_cpu(header->ucode_array_offset_bytes) + le32_to_cpu(dmcu_hdr->intv_offset_bytes)), ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) { + ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); + memcpy(ucode->kaddr, + (void *)((uint8_t *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes)), + ucode->ucode_size); } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index b34f00d42049..eaf2d5b9c92f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 { uint32_t ta_ras_ucode_version; uint32_t ta_ras_offset_bytes; uint32_t ta_ras_size_bytes; + uint32_t ta_hdcp_ucode_version; + uint32_t ta_hdcp_offset_bytes; + uint32_t ta_hdcp_size_bytes; + uint32_t ta_dtm_ucode_version; + uint32_t ta_dtm_offset_bytes; + uint32_t ta_dtm_size_bytes; }; /* version_major=1, version_minor=0 */ @@ -245,6 +251,13 @@ struct dmcu_firmware_header_v1_0 { uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */ }; +/* version_major=1, version_minor=0 */ +struct dmcub_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t inst_const_bytes; /* size of instruction region, in bytes */ + uint32_t bss_data_bytes; /* size of bss/data region, in bytes */ +}; + /* header is fixed size */ union amdgpu_firmware_header { struct common_firmware_header common; @@ -262,6 +275,7 @@ union amdgpu_firmware_header { struct sdma_firmware_header_v1_1 sdma_v1_1; struct gpu_info_firmware_header_v1_0 gpu_info; struct dmcu_firmware_header_v1_0 dmcu; + struct dmcub_firmware_header_v1_0 dmcub; uint8_t raw[0x100]; }; @@ -301,6 +315,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_DMCU_INTV, AMDGPU_UCODE_ID_VCN0_RAM, AMDGPU_UCODE_ID_VCN1_RAM, + AMDGPU_UCODE_ID_DMCUB, AMDGPU_UCODE_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c new file mode 100644 index 000000000000..d4fb9cf27e21 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -0,0 +1,158 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_ras.h" + +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "umc_err_count", + .debugfs_name = "umc_err_inject", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_umc_process_ras_data_cb, + }; + + if (!adev->umc.ras_if) { + adev->umc.ras_if = + kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->umc.ras_if) + return -ENOMEM; + adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC; + adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->umc.ras_if->sub_block_index = 0; + strcpy(adev->umc.ras_if->name, "umc"); + } + ih_info.head = fs_info.head = *adev->umc.ras_if; + + r = amdgpu_ras_late_init(adev, adev->umc.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + /* ras init of specific umc version */ + if (adev->umc.funcs && adev->umc.funcs->err_cnt_init) + adev->umc.funcs->err_cnt_init(adev); + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info); +free: + kfree(adev->umc.ras_if); + adev->umc.ras_if = NULL; + return r; +} + +void amdgpu_umc_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && + adev->umc.ras_if) { + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_umc_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return AMDGPU_RAS_SUCCESS; + + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->umc.funcs && + adev->umc.funcs->query_ras_error_count) + adev->umc.funcs->query_ras_error_count(adev, ras_error_status); + + if (adev->umc.funcs && + adev->umc.funcs->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + DRM_WARN("Failed to alloc memory for umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.funcs->query_ras_error_address(adev, ras_error_status); + } + + /* only uncorrectable error needs gpu reset */ + if (err_data->ue_count) { + if (err_data->err_addr_cnt && + amdgpu_ras_add_bad_pages(adev, err_data->err_addr, + err_data->err_addr_cnt)) + DRM_WARN("Failed to add ras bad page!\n"); + + amdgpu_ras_reset_gpu(adev, 0); + } + + kfree(err_data->err_addr); + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 975afa04df09..3283032a78e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -54,7 +54,8 @@ adev->umc.funcs->disable_umc_index_mode(adev); struct amdgpu_umc_funcs { - void (*ras_init)(struct amdgpu_device *adev); + void (*err_cnt_init)(struct amdgpu_device *adev); + int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, @@ -62,6 +63,7 @@ struct amdgpu_umc_funcs { void (*enable_umc_index_mode)(struct amdgpu_device *adev, uint32_t umc_instance); void (*disable_umc_index_mode)(struct amdgpu_device *adev); + void (*init_registers)(struct amdgpu_device *adev); }; struct amdgpu_umc { @@ -75,8 +77,17 @@ struct amdgpu_umc { uint32_t channel_offs; /* channel index table of interleaved memory */ const uint32_t *channel_idx_tbl; + struct ras_common_if *ras_if; const struct amdgpu_umc_funcs *funcs; }; +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); +void amdgpu_umc_ras_fini(struct amdgpu_device *adev); +int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry); +int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2c364b8695f..d587ffe2af8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -39,6 +39,8 @@ #include "cikd.h" #include "uvd/uvd_4_2_d.h" +#include "amdgpu_ras.h" + /* 1 second timeout */ #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) @@ -297,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i, j; + cancel_delayed_work_sync(&adev->uvd.idle_work); drm_sched_entity_destroy(&adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { @@ -346,6 +349,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) unsigned size; void *ptr; int i, j; + bool in_ras_intr = amdgpu_ras_intr_triggered(); cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -372,8 +376,16 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + /* re-write 0 since err_event_athub will corrupt VCPU buffer */ + if (in_ras_intr) + memset(adev->uvd.inst[j].saved_bo, 0, size); + else + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); } + + if (in_ras_intr) + DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b70b3c45bb29..46b590af2fd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); +static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence); +static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + bool direct, struct dma_fence **fence); /** * amdgpu_vce_init - allocate memory, load vce firmware @@ -211,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; + cancel_delayed_work_sync(&adev->vce.idle_work); drm_sched_entity_destroy(&adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, @@ -428,14 +434,15 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * * Open up a stream for HW test */ -int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) +static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -444,7 +451,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); /* stitch together an VCE create msg */ ib->length_dw = 0; @@ -476,8 +483,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000014; /* len */ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000001; for (i = ib->length_dw; i < ib_size_dw; ++i) @@ -507,8 +514,8 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) +static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + bool direct, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; @@ -1110,13 +1117,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ if (ring != &ring->adev->vce.ring[0]) return 0; - r = amdgpu_vce_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -1132,5 +1146,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 30ea54dd9117..d6d83a3ec803 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -58,10 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_entity_init(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); -int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence); -int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 7a6beb2e7c4e..428cfd58b37d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) { int i, j; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (adev->vcn.indirect_sram) { amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, &adev->vcn.dpg_sram_gpu_addr, @@ -212,8 +214,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_enc_rings; ++i) amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); - - amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg); } release_firmware(adev->vcn.fw); @@ -306,7 +306,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -314,7 +314,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) adev->vcn.pause_dpg_mode(adev, &new_state); } - fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg); + fence[j] += amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec); fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); fences += fence[j]; } @@ -358,7 +358,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->jpeg.inst[ring->me].ring_dec)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x0000000b; ib->ptr[ib->length_dw++] = 0x00000014; @@ -621,13 +622,14 @@ err: } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x0000000b; ib->ptr[ib->length_dw++] = 0x00000014; @@ -675,13 +677,20 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; - r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); if (r) goto error; @@ -693,110 +702,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - return r; -} - -int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t tmp = 0; - unsigned i; - int r; - - WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); - r = amdgpu_ring_alloc(ring, 3); - if (r) - return r; - - amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0)); - amdgpu_ring_write(ring, 0xDEADBEEF); - amdgpu_ring_commit(ring); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; - - return r; -} - -static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_job *job; - struct amdgpu_ib *ib; - struct dma_fence *f = NULL; - const unsigned ib_size_dw = 16; - int i, r; - - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); - if (r) - return r; - - ib = &job->ibs[0]; - - ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); - ib->ptr[1] = 0xDEADBEEF; - for (i = 2; i < 16; i += 2) { - ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); - ib->ptr[i+1] = 0; - } - ib->length_dw = 16; - - r = amdgpu_job_submit_direct(job, ring, &f); - if (r) - goto err; - - if (fence) - *fence = dma_fence_get(f); - dma_fence_put(f); - - return 0; - -err: - amdgpu_job_free(job); - return r; -} - -int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t tmp = 0; - unsigned i; - struct dma_fence *fence = NULL; - long r = 0; - - r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); - if (r) - goto error; - - r = dma_fence_wait_timeout(fence, false, timeout); - if (r == 0) { - r = -ETIMEDOUT; - goto error; - } else if (r < 0) { - goto error; - } else { - r = 0; - } - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); - if (tmp == 0xDEADBEEF) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) - r = -ETIMEDOUT; - - dma_fence_put(fence); -error: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dface275c81a..402a5046b985 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -158,7 +158,6 @@ struct amdgpu_vcn_reg{ unsigned ib_size; unsigned gp_scratch8; unsigned scratch9; - unsigned jpeg_pitch; }; struct amdgpu_vcn_inst { @@ -168,7 +167,6 @@ struct amdgpu_vcn_inst { void *saved_bo; struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; - struct amdgpu_ring ring_jpeg; struct amdgpu_irq_src irq; struct amdgpu_vcn_reg external; }; @@ -209,7 +207,4 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); -int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e32ae906d797..103033f96f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -379,54 +379,3 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) } } } - -static uint32_t parse_clk(char *buf, bool min) -{ - char *ptr = buf; - uint32_t clk = 0; - - do { - ptr = strchr(ptr, ':'); - if (!ptr) - break; - ptr+=2; - if (kstrtou32(ptr, 10, &clk)) - return 0; - } while (!min); - - return clk * 100; -} - -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) -{ - char *buf = NULL; - uint32_t clk = 0; - - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); - clk = parse_clk(buf, lowest); - - kfree(buf); - - return clk; -} - -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) -{ - char *buf = NULL; - uint32_t clk = 0; - - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); - clk = parse_clk(buf, lowest); - - kfree(buf); - - return clk; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b0b2bdc750df..4d1ac7612967 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -57,8 +57,6 @@ struct amdgpu_virt_ops { int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); - int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); - int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); }; /* @@ -85,8 +83,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, - /* HW PERF SIM in GIM */ - AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), + /* PP ONE VF MODE in GIM */ + AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), }; struct amd_sriov_msg_pf2vf_info_header { @@ -257,8 +255,6 @@ struct amdgpu_virt { struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; - /* protect DPM events to GIM */ - struct mutex dpm_mutex; uint32_t reg_access_mode; }; @@ -286,8 +282,8 @@ static inline bool is_virtual_machine(void) #endif } -#define amdgim_is_hwperf(adev) \ - ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) +#define amdgpu_sriov_is_pp_one_vf(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); @@ -306,6 +302,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, unsigned int key, unsigned int chksum); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d8cfcf2d7455..8f26504a59a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, if (level == adev->vm_manager.root_level) /* For the root directory */ - return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift; + return round_up(adev->vm_manager.max_pfn, 1ULL << shift) + >> shift; else if (level != AMDGPU_VM_PTB) /* Everything in between */ return 512; @@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); } -/** +/* * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt */ struct amdgpu_vm_pt_cursor { @@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev, * * @adev: amdgpu_device structure * @vm: amdgpu_vm structure + * @start: optional cursor to start with * @cursor: state to initialize * * Starts a deep first traversal of the PD/PT tree. @@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, amdgpu_vm_pt_ancestor(cursor); } -/** +/* * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs */ #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ @@ -560,12 +562,20 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, { entry->priority = 0; entry->tv.bo = &vm->root.base.bo->tbo; - /* One for the VM updates, one for TTM and one for the CS job */ - entry->tv.num_shared = 3; + /* One for TTM and one for the CS job */ + entry->tv.num_shared = 2; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } +/** + * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag + * + * @bo: BO which was removed from the LRU + * + * Make sure the bulk_moveable flag is updated when a BO is removed from the + * LRU. + */ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) { struct amdgpu_bo *abo; @@ -646,7 +656,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { struct amdgpu_vm_bo_base *bo_base, *tmp; - int r = 0; + int r; vm->bulk_moveable &= list_empty(&vm->evicted); @@ -655,7 +665,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = validate(param, bo); if (r) - break; + return r; if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); @@ -668,7 +678,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } - return r; + mutex_lock(&vm->eviction_lock); + vm->evicting = false; + mutex_unlock(&vm->eviction_lock); + + return 0; } /** @@ -692,6 +706,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: VM to clear BO from * @bo: BO to clear + * @direct: use a direct update * * Root PD needs to be reserved when calling this. * @@ -700,7 +715,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo) + struct amdgpu_bo *bo, + bool direct) { struct ttm_operation_ctx ctx = { true, false }; unsigned level = adev->vm_manager.root_level; @@ -759,6 +775,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); if (r) @@ -812,10 +829,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requesting vm + * @level: the page table level + * @direct: use a direct update * @bp: resulting BO allocation parameters */ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int level, struct amdgpu_bo_param *bp) + int level, bool direct, + struct amdgpu_bo_param *bp) { memset(bp, 0, sizeof(*bp)); @@ -830,6 +850,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, else if (!vm->root.base.bo || vm->root.base.bo->shadow) bp->flags |= AMDGPU_GEM_CREATE_SHADOW; bp->type = ttm_bo_type_kernel; + bp->no_wait_gpu = direct; if (vm->root.base.bo) bp->resv = vm->root.base.bo->tbo.base.resv; } @@ -840,6 +861,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, * @adev: amdgpu_device pointer * @vm: VM to allocate page tables for * @cursor: Which page table to allocate + * @direct: use a direct update * * Make sure a specific page table or directory is allocated. * @@ -849,7 +871,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_vm_pt_cursor *cursor) + struct amdgpu_vm_pt_cursor *cursor, + bool direct) { struct amdgpu_vm_pt *entry = cursor->entry; struct amdgpu_bo_param bp; @@ -870,7 +893,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, if (entry->base.bo) return 0; - amdgpu_vm_bo_param(adev, vm, cursor->level, &bp); + amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); r = amdgpu_bo_create(adev, &bp, &pt); if (r) @@ -882,7 +905,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - r = amdgpu_vm_clear_bo(adev, vm, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt, direct); if (r) goto error_free_pt; @@ -1019,7 +1042,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, * Returns: * 0 on success, errno otherwise. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, + bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -1033,10 +1057,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->oa_base != job->oa_base || id->oa_size != job->oa_size); bool vm_flush_needed = job->vm_needs_flush; - bool pasid_mapping_needed = id->pasid != job->pasid || - !id->pasid_mapping || - !dma_fence_is_signaled(id->pasid_mapping); struct dma_fence *fence = NULL; + bool pasid_mapping_needed = false; unsigned patch_offset = 0; int r; @@ -1046,6 +1068,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ pasid_mapping_needed = true; } + mutex_lock(&id_mgr->lock); + if (id->pasid != job->pasid || !id->pasid_mapping || + !dma_fence_is_signaled(id->pasid_mapping)) + pasid_mapping_needed = true; + mutex_unlock(&id_mgr->lock); + gds_switch_needed &= !!ring->funcs->emit_gds_switch; vm_flush_needed &= !!ring->funcs->emit_vm_flush && job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; @@ -1085,9 +1113,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } if (pasid_mapping_needed) { + mutex_lock(&id_mgr->lock); id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); id->pasid_mapping = dma_fence_get(fence); + mutex_unlock(&id_mgr->lock); } dma_fence_put(fence); @@ -1171,10 +1201,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) return result; } -/* +/** * amdgpu_vm_update_pde - update a single level in the hierarchy * - * @param: parameters for the update + * @params: parameters for the update * @vm: requested vm * @entry: entry to update * @@ -1198,7 +1228,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); } -/* +/** * amdgpu_vm_invalidate_pds - mark all PDs as invalid * * @adev: amdgpu_device pointer @@ -1217,19 +1247,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, amdgpu_vm_bo_relocated(&entry->base); } -/* - * amdgpu_vm_update_directories - make sure that all directories are valid +/** + * amdgpu_vm_update_pdes - make sure that all directories are valid * * @adev: amdgpu_device pointer * @vm: requested vm + * @direct: submit directly to the paging queue * * Makes sure all directories are up to date. * * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_update_directories(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +int amdgpu_vm_update_pdes(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct) { struct amdgpu_vm_update_params params; int r; @@ -1240,6 +1271,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); if (r) @@ -1267,7 +1299,7 @@ error: return r; } -/** +/* * amdgpu_vm_update_flags - figure out flags for PTE updates * * Make sure to set the right flags for the PTEs at the desired level. @@ -1390,7 +1422,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t incr, entry_end, pe_start; struct amdgpu_bo *pt; - r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor); + /* make sure that the page tables covering the address range are + * actually allocated + */ + r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, + params->direct); if (r) return r; @@ -1462,7 +1498,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, } while (frag_start < entry_end); if (amdgpu_vm_pt_descendant(adev, &cursor)) { - /* Free all child entries */ + /* Free all child entries. + * Update the tables with the flags and addresses and free up subsequent + * tables in the case of huge pages or freed up areas. + * This is the maximum you can free, because all other page tables are not + * completely covered by the range and so potentially still in use. + */ while (cursor.pfn < frag_start) { amdgpu_vm_free_pts(adev, params->vm, &cursor); amdgpu_vm_pt_next(adev, &cursor); @@ -1481,13 +1522,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer - * @exclusive: fence we need to sync to - * @pages_addr: DMA addresses to use for mapping * @vm: requested vm + * @direct: direct submission in a page fault + * @exclusive: fence we need to sync to * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries * @addr: addr to set the area to + * @pages_addr: DMA addresses to use for mapping * @fence: optional resulting fence * * Fill in the page table entries between @start and @last. @@ -1496,11 +1538,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct, struct dma_fence *exclusive, - dma_addr_t *pages_addr, - struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, + dma_addr_t *pages_addr, struct dma_fence **fence) { struct amdgpu_vm_update_params params; @@ -1510,21 +1552,32 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; params.pages_addr = pages_addr; /* sync to everything except eviction fences on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) owner = AMDGPU_FENCE_OWNER_KFD; + mutex_lock(&vm->eviction_lock); + if (vm->evicting) { + r = -EBUSY; + goto error_unlock; + } + r = vm->update_funcs->prepare(¶ms, owner, exclusive); if (r) - return r; + goto error_unlock; r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); if (r) - return r; + goto error_unlock; - return vm->update_funcs->commit(¶ms, fence); + r = vm->update_funcs->commit(¶ms, fence); + +error_unlock: + mutex_unlock(&vm->eviction_lock); + return r; } /** @@ -1568,27 +1621,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) flags &= ~AMDGPU_PTE_WRITEABLE; - flags &= ~AMDGPU_PTE_EXECUTABLE; - flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; - - if (adev->asic_type >= CHIP_NAVI10) { - flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); - } else { - flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK); - } - - if ((mapping->flags & AMDGPU_PTE_PRT) && - (adev->asic_type >= CHIP_VEGA10)) { - flags |= AMDGPU_PTE_PRT; - if (adev->asic_type >= CHIP_NAVI10) { - flags |= AMDGPU_PTE_SNOOPED; - flags |= AMDGPU_PTE_LOG; - flags |= AMDGPU_PTE_SYSTEM; - } - flags &= ~AMDGPU_PTE_VALID; - } + /* Apply ASIC specific mapping flags */ + amdgpu_gmc_get_vm_pte(adev, mapping, &flags); trace_amdgpu_vm_bo_update(mapping); @@ -1632,7 +1666,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, dma_addr = pages_addr; } else { addr = pages_addr[pfn]; - max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE; + max_entries = count * + AMDGPU_GPU_PAGES_IN_CPU_PAGE; } } else if (flags & AMDGPU_PTE_VALID) { @@ -1641,9 +1676,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } last = min((uint64_t)mapping->last, start + max_entries - 1); - r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, start, last, flags, addr, - fence); + dma_addr, fence); if (r) return r; @@ -1671,8 +1706,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * Returns: * 0 for success, -EINVAL for failure. */ -int amdgpu_vm_bo_update(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, +int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; @@ -1699,7 +1733,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = dma_resv_get_excl(bo->tbo.base.resv); + exclusive = bo->tbo.moving; } if (bo) { @@ -1730,12 +1764,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, return r; } - if (vm->use_cpu_for_update) { - /* Flush HDP */ - mb(); - amdgpu_asic_flush_hdp(adev, NULL); - } - /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the * next command submission. @@ -1743,7 +1771,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { uint32_t mem_type = bo->tbo.mem.mem_type; - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + if (!(bo->preferred_domains & + amdgpu_mem_type_to_domain(mem_type))) amdgpu_vm_bo_evicted(&bo_va->base); else amdgpu_vm_bo_idle(&bo_va->base); @@ -1937,9 +1966,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, mapping->start, mapping->last, - init_pte_value, 0, &f); + init_pte_value, 0, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2485,6 +2514,41 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, } /** + * amdgpu_vm_evictable - check if we can evict a VM + * + * @bo: A page table of the VM. + * + * Check if it is possible to evict a VM. + */ +bool amdgpu_vm_evictable(struct amdgpu_bo *bo) +{ + struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; + + /* Page tables of a destroyed VM can go away immediately */ + if (!bo_base || !bo_base->vm) + return true; + + /* Don't evict VM page tables while they are busy */ + if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + return false; + + /* Try to block ongoing updates */ + if (!mutex_trylock(&bo_base->vm->eviction_lock)) + return false; + + /* Don't evict VM page tables while they are updated */ + if (!dma_fence_is_signaled(bo_base->vm->last_direct) || + !dma_fence_is_signaled(bo_base->vm->last_delayed)) { + mutex_unlock(&bo_base->vm->eviction_lock); + return false; + } + + bo_base->vm->evicting = true; + mutex_unlock(&bo_base->vm->eviction_lock); + return true; +} + +/** * amdgpu_vm_bo_invalidate - mark the bo as invalid * * @adev: amdgpu_device pointer @@ -2646,8 +2710,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, + true, true, timeout); + if (timeout <= 0) + return timeout; + + timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); + if (timeout <= 0) + return timeout; + + return dma_fence_wait_timeout(vm->last_delayed, true, timeout); } /** @@ -2681,13 +2753,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->freed); - /* create scheduler entity for page table updates */ - r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, + /* create scheduler entities for page table updates */ + r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, adev->vm_manager.vm_pte_num_rqs, NULL); if (r) return r; + r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, + adev->vm_manager.vm_pte_num_rqs, NULL); + if (r) + goto error_free_direct; + vm->pte_support_ats = false; + vm->is_compute_context = false; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -2701,7 +2779,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && + !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->use_cpu_for_update) @@ -2709,13 +2788,18 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, else vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; + vm->last_direct = dma_fence_get_stub(); + vm->last_delayed = dma_fence_get_stub(); - amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); + mutex_init(&vm->eviction_lock); + vm->evicting = false; + + amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; r = amdgpu_bo_create(adev, &bp, &root); if (r) - goto error_free_sched_entity; + goto error_free_delayed; r = amdgpu_bo_reserve(root, true); if (r) @@ -2727,7 +2811,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, amdgpu_vm_bo_base_init(&vm->root.base, vm, root); - r = amdgpu_vm_clear_bo(adev, vm, root); + r = amdgpu_vm_clear_bo(adev, vm, root, false); if (r) goto error_unreserve; @@ -2758,8 +2842,13 @@ error_free_root: amdgpu_bo_unref(&vm->root.base.bo); vm->root.base.bo = NULL; -error_free_sched_entity: - drm_sched_entity_destroy(&vm->entity); +error_free_delayed: + dma_fence_put(vm->last_direct); + dma_fence_put(vm->last_delayed); + drm_sched_entity_destroy(&vm->delayed); + +error_free_direct: + drm_sched_entity_destroy(&vm->direct); return r; } @@ -2800,6 +2889,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm + * @pasid: pasid to use * * This only works on GFX VMs that don't have any BOs added and no * page tables allocated yet. @@ -2815,7 +2905,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * Returns: * 0 for success, -errno for errors. */ -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, + unsigned int pasid) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; @@ -2847,7 +2938,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns */ if (pte_support_ats != vm->pte_support_ats) { vm->pte_support_ats = pte_support_ats; - r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo); + r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); if (r) goto free_idr; } @@ -2857,7 +2948,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns AMDGPU_VM_USE_CPU_FOR_COMPUTE); DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && + !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->use_cpu_for_update) @@ -2866,6 +2958,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns vm->update_funcs = &amdgpu_vm_sdma_funcs; dma_fence_put(vm->last_update); vm->last_update = NULL; + vm->is_compute_context = true; if (vm->pasid) { unsigned long flags; @@ -2920,6 +3013,7 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } vm->pasid = 0; + vm->is_compute_context = false; } /** @@ -2936,31 +3030,26 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; struct amdgpu_bo *root; - int i, r; + int i; amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); + root = amdgpu_bo_ref(vm->root.base.bo); + amdgpu_bo_reserve(root, true); if (vm->pasid) { unsigned long flags; spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + vm->pasid = 0; } - drm_sched_entity_destroy(&vm->entity); + dma_fence_wait(vm->last_direct, false); + dma_fence_put(vm->last_direct); + dma_fence_wait(vm->last_delayed, false); + dma_fence_put(vm->last_delayed); - if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { - dev_err(adev->dev, "still active bo inside vm\n"); - } - rbtree_postorder_for_each_entry_safe(mapping, tmp, - &vm->va.rb_root, rb) { - /* Don't remove the mapping here, we don't want to trigger a - * rebalance and the tree is about to be destroyed anyway. - */ - list_del(&mapping->list); - kfree(mapping); - } list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { amdgpu_vm_prt_fini(adev, vm); @@ -2971,16 +3060,26 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - root = amdgpu_bo_ref(vm->root.base.bo); - r = amdgpu_bo_reserve(root, true); - if (r) { - dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); - } else { - amdgpu_vm_free_pts(adev, vm, NULL); - amdgpu_bo_unreserve(root); - } + amdgpu_vm_free_pts(adev, vm, NULL); + amdgpu_bo_unreserve(root); amdgpu_bo_unref(&root); WARN_ON(vm->root.base.bo); + + drm_sched_entity_destroy(&vm->direct); + drm_sched_entity_destroy(&vm->delayed); + + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { + dev_err(adev->dev, "still active bo inside vm\n"); + } + rbtree_postorder_for_each_entry_safe(mapping, tmp, + &vm->va.rb_root, rb) { + /* Don't remove the mapping here, we don't want to trigger a + * rebalance and the tree is about to be destroyed anyway. + */ + list_del(&mapping->list); + kfree(mapping); + } + dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vmid_free_reserved(adev, vm, i); @@ -3064,8 +3163,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: - /* current, we only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); + /* We only have requirement to reserve vmid from gfxhub */ + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, + AMDGPU_GFXHUB_0); if (r) return r; break; @@ -3108,13 +3208,97 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, */ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) { - if (!vm->task_info.pid) { - vm->task_info.pid = current->pid; - get_task_comm(vm->task_info.task_name, current); + if (vm->task_info.pid) + return; - if (current->group_leader->mm == current->mm) { - vm->task_info.tgid = current->group_leader->pid; - get_task_comm(vm->task_info.process_name, current->group_leader); - } + vm->task_info.pid = current->pid; + get_task_comm(vm->task_info.task_name, current); + + if (current->group_leader->mm != current->mm) + return; + + vm->task_info.tgid = current->group_leader->pid; + get_task_comm(vm->task_info.process_name, current->group_leader); +} + +/** + * amdgpu_vm_handle_fault - graceful handling of VM faults. + * @adev: amdgpu device pointer + * @pasid: PASID of the VM + * @addr: Address of the fault + * + * Try to gracefully handle a VM fault. Return true if the fault was handled and + * shouldn't be reported any more. + */ +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, + uint64_t addr) +{ + struct amdgpu_bo *root; + uint64_t value, flags; + struct amdgpu_vm *vm; + long r; + + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + if (vm) + root = amdgpu_bo_ref(vm->root.base.bo); + else + root = NULL; + spin_unlock(&adev->vm_manager.pasid_lock); + + if (!root) + return false; + + r = amdgpu_bo_reserve(root, true); + if (r) + goto error_unref; + + /* Double check that the VM still exists */ + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + if (vm && vm->root.base.bo != root) + vm = NULL; + spin_unlock(&adev->vm_manager.pasid_lock); + if (!vm) + goto error_unlock; + + addr /= AMDGPU_GPU_PAGE_SIZE; + flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | + AMDGPU_PTE_SYSTEM; + + if (vm->is_compute_context) { + /* Intentionally setting invalid PTE flag + * combination to force a no-retry-fault + */ + flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | + AMDGPU_PTE_TF; + value = 0; + + } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { + /* Redirect the access to the dummy page */ + value = adev->dummy_page_addr; + flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | + AMDGPU_PTE_WRITEABLE; + + } else { + /* Let the hw retry silently on the PTE */ + value = 0; } + + r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, + flags, value, NULL, NULL); + if (r) + goto error_unlock; + + r = amdgpu_vm_update_pdes(adev, vm, true); + +error_unlock: + amdgpu_bo_unreserve(root); + if (r < 0) + DRM_ERROR("Can't handle page fault (%ld)\n", r); + +error_unref: + amdgpu_bo_unref(&root); + + return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2eda3a8c330d..7e0eb36da27d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 +/* Reserve 4MB VRAM for page tables */ +#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20) + /* max number of VMHUB */ #define AMDGPU_MAX_VMHUBS 3 #define AMDGPU_GFXHUB_0 0 @@ -199,6 +202,11 @@ struct amdgpu_vm_update_params { struct amdgpu_vm *vm; /** + * @direct: if changes should be made directly + */ + bool direct; + + /** * @pages_addr: * * DMA addresses to use for mapping @@ -231,6 +239,10 @@ struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; + /* Lock to prevent eviction while we are updating page tables */ + struct mutex eviction_lock; + bool evicting; + /* BOs who needs a validation */ struct list_head evicted; @@ -254,8 +266,13 @@ struct amdgpu_vm { struct amdgpu_vm_pt root; struct dma_fence *last_update; - /* Scheduler entity for page table updates */ - struct drm_sched_entity entity; + /* Scheduler entities for page table updates */ + struct drm_sched_entity direct; + struct drm_sched_entity delayed; + + /* Last submission to the scheduler entities */ + struct dma_fence *last_direct; + struct dma_fence *last_delayed; unsigned int pasid; /* dedicated to vm */ @@ -289,6 +306,8 @@ struct amdgpu_vm { struct ttm_lru_bulk_move lru_bulk_move; /* mark whether can do the bulk move */ bool bulk_moveable; + /* Flag to indicate if VM is used for compute */ + bool is_compute_context; }; struct amdgpu_vm_manager { @@ -357,8 +376,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); -int amdgpu_vm_update_directories(struct amdgpu_device *adev, - struct amdgpu_vm *vm); +int amdgpu_vm_update_pdes(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); @@ -367,6 +386,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); +bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); @@ -404,6 +424,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info); +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, + uint64_t addr); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 5222d165abfc..73fec7a0ced5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, { int r; - /* Wait for PT BOs to be idle. PTs share the same resv. object - * as the root PD BO - */ - r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); - if (unlikely(r)) - return r; - /* Wait for any BO move to be completed */ if (exclusive) { r = dma_fence_wait(exclusive, true); @@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, return r; } - return 0; + /* Don't wait for submissions during page fault */ + if (p->direct) + return 0; + + /* Wait for PT BOs to be idle. PTs share the same resv. object + * as the root PD BO + */ + return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); } /** @@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, pe += (unsigned long)amdgpu_bo_kptr(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); for (i = 0; i < count; i++) { value = p->pages_addr ? diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 61fc584cbb1a..19b7f80758f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, if (r) return r; - r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false); - if (r) - return r; + p->num_dw_left = ndw; - r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, - owner, false); + /* Wait for moves to be completed */ + r = amdgpu_sync_fence(&p->job->sync, exclusive, false); if (r) return r; - p->num_dw_left = ndw; - return 0; + /* Don't wait for any submissions during page fault handling */ + if (p->direct) + return 0; + + return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, + owner, false); } /** @@ -93,24 +95,30 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { - struct amdgpu_bo *root = p->vm->root.base.bo; struct amdgpu_ib *ib = p->job->ibs; + struct drm_sched_entity *entity; + struct dma_fence *f, *tmp; struct amdgpu_ring *ring; - struct dma_fence *f; int r; - ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched); + entity = p->direct ? &p->vm->direct : &p->vm->delayed; + ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > p->num_dw_left); - r = amdgpu_job_submit(p->job, &p->vm->entity, - AMDGPU_FENCE_OWNER_VM, &f); + r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error; - amdgpu_bo_fence(root, f, true); - if (fence) + tmp = dma_fence_get(f); + if (p->direct) + swap(p->vm->last_direct, tmp); + else + swap(p->vm->last_delayed, tmp); + dma_fence_put(tmp); + + if (fence && !p->direct) swap(*fence, f); dma_fence_put(f); return 0; @@ -120,7 +128,6 @@ error: return r; } - /** * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping * @@ -141,7 +148,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, src += p->num_dw_left * 4; pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_copy_ptes(pe, src, count); + trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); } @@ -168,7 +175,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, struct amdgpu_ib *ib = p->job->ibs; pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); if (count < 3) { amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, count, incr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 3a9d8c15fe9f..82a3299e53c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -23,6 +23,9 @@ */ #include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_atomfirmware.h" +#include "atom.h" struct amdgpu_vram_mgr { struct drm_mm mm; @@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); } +static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + switch (adev->gmc.vram_vendor) { + case SAMSUNG: + return snprintf(buf, PAGE_SIZE, "samsung\n"); + case INFINEON: + return snprintf(buf, PAGE_SIZE, "infineon\n"); + case ELPIDA: + return snprintf(buf, PAGE_SIZE, "elpida\n"); + case ETRON: + return snprintf(buf, PAGE_SIZE, "etron\n"); + case NANYA: + return snprintf(buf, PAGE_SIZE, "nanya\n"); + case HYNIX: + return snprintf(buf, PAGE_SIZE, "hynix\n"); + case MOSEL: + return snprintf(buf, PAGE_SIZE, "mosel\n"); + case WINBOND: + return snprintf(buf, PAGE_SIZE, "winbond\n"); + case ESMT: + return snprintf(buf, PAGE_SIZE, "esmt\n"); + case MICRON: + return snprintf(buf, PAGE_SIZE, "micron\n"); + default: + return snprintf(buf, PAGE_SIZE, "unknown\n"); + } +} + static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, amdgpu_mem_info_vram_total_show, NULL); static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, @@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, amdgpu_mem_info_vram_used_show, NULL); static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, amdgpu_mem_info_vis_vram_used_show, NULL); +static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, + amdgpu_mem_info_vram_vendor, NULL); /** * amdgpu_vram_mgr_init - init VRAM manager and DRM MM @@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n"); return ret; } + ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor); + if (ret) { + DRM_ERROR("Failed to create device file mem_info_vram_vendor\n"); + return ret; + } return 0; } @@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total); device_remove_file(adev->dev, &dev_attr_mem_info_vram_used); device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used); + device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor); return 0; } @@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; unsigned long lpfn, num_nodes, pages_per_node, pages_left; - uint64_t vis_usage = 0, mem_bytes; + uint64_t vis_usage = 0, mem_bytes, max_bytes; unsigned i; int r; @@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (!lpfn) lpfn = man->size; + max_bytes = adev->gmc.mc_vram_size; + if (tbo->type != ttm_bo_type_kernel) + max_bytes -= AMDGPU_VM_RESERVED_VRAM; + /* bail out quickly if there's likely not enough VRAM for this BO */ mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; - if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) { + if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { atomic64_sub(mem_bytes, &mgr->usage); mem->mm_node = NULL; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 65aae75f80fd..61d13d8b7b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" +#include "amdgpu_ras.h" #include "df/df_3_6_offset.h" static DEFINE_MUTEX(xgmi_mutex); @@ -273,22 +274,55 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) { int ret = 0; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + struct amdgpu_device *tmp_adev; + bool update_hive_pstate = true; + bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20; if (!hive) return 0; - if (hive->pstate == pstate) - return 0; + mutex_lock(&hive->hive_lock); + + if (hive->pstate == pstate) { + adev->pstate = is_high_pstate ? pstate : adev->pstate; + goto out; + } dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); if (is_support_sw_smu_xgmi(adev)) ret = smu_set_xgmi_pstate(&adev->smu, pstate); - if (ret) + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_xgmi_pstate) + ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, + pstate); + + if (ret) { dev_err(adev->dev, "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id, ret); + goto out; + } + + /* Update device pstate */ + adev->pstate = pstate; + + /* + * Update the hive pstate only all devices of the hive + * are in the same pstate + */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + if (tmp_adev->pstate != adev->pstate) { + update_hive_pstate = false; + break; + } + } + if (update_hive_pstate || is_high_pstate) + hive->pstate = pstate; + +out: + mutex_unlock(&hive->hive_lock); return ret; } @@ -363,6 +397,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) goto exit; } + /* Set default device pstate */ + adev->pstate = -1; + top_info = &adev->psp.xgmi_context.top_info; list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); @@ -437,3 +474,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) mutex_unlock(&hive->hive_lock); } } + +int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "xgmi_wafl_err_count", + .debugfs_name = "xgmi_wafl_err_inject", + }; + + if (!adev->gmc.xgmi.supported || + adev->gmc.xgmi.num_physical_nodes == 0) + return 0; + + if (!adev->gmc.xgmi.ras_if) { + adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->gmc.xgmi.ras_if) + return -ENOMEM; + adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL; + adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gmc.xgmi.ras_if->sub_block_index = 0; + strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl"); + } + ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if; + r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) { + kfree(adev->gmc.xgmi.ras_if); + adev->gmc.xgmi.ras_if = NULL; + } + + return r; +} + +void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) && + adev->gmc.xgmi.ras_if) { + struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index fbcee31788c4..bbf504ff7051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); +void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c index 4853899b1824..fda99c958c3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "arct_ip_offset.h" int arct_reg_base_init(struct amdgpu_device *adev) @@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i])); + adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i])); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 6858cde9fc5d..ea702a64f807 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -361,7 +361,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - struct amdgpu_connector_atom_dig *dig_connector; int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector); u8 tmp; @@ -369,8 +368,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder, if (!amdgpu_connector->con_priv) return panel_mode; - dig_connector = amdgpu_connector->con_priv; - if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, @@ -713,7 +710,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct amdgpu_encoder_atom_dig *dig; struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *dig_connector; struct amdgpu_atombios_dp_link_train_info dp_info; @@ -721,7 +717,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, if (!amdgpu_encoder->enc_priv) return; - dig = amdgpu_encoder->enc_priv; amdgpu_connector = to_amdgpu_connector(connector); if (!amdgpu_connector->con_priv) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index 980c363b1a0a..b4cc7c55fa16 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -76,11 +76,6 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, } args.lpI2CDataOut = cpu_to_le16(out); } else { - if (num > ATOM_MAX_HW_I2C_READ) { - DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); - r = -EINVAL; - goto done; - } args.ucRegIndex = 0; args.lpI2CDataOut = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index b81bb414fcb3..e9822ea8bb19 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS}, + {mmGRBM_STATUS2}, + {mmGRBM_STATUS_SE0}, + {mmGRBM_STATUS_SE1}, + {mmGRBM_STATUS_SE2}, + {mmGRBM_STATUS_SE3}, + {mmSRBM_STATUS}, + {mmSRBM_STATUS2}, + {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, + {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, + {mmCP_STAT}, + {mmCP_STALLED_STAT1}, + {mmCP_STALLED_STAT2}, + {mmCP_STALLED_STAT3}, + {mmCP_CPF_BUSY_STAT}, + {mmCP_CPF_STALLED_STAT1}, + {mmCP_CPF_STATUS}, + {mmCP_CPC_BUSY_STAT}, + {mmCP_CPC_STALLED_STAT1}, + {mmCP_CPC_STATUS}, {mmGB_ADDR_CONFIG}, {mmMC_ARB_RAMCFG}, {mmGB_TILE_MODE0}, @@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) } /** - * cik_asic_reset - soft reset GPU + * cik_asic_pci_config_reset - soft reset GPU * * @adev: amdgpu_device pointer * - * Look up which blocks are hung and attempt - * to reset them. + * Use PCI Config method to reset the GPU. + * * Returns 0 for success. */ -static int cik_asic_reset(struct amdgpu_device *adev) +static int cik_asic_pci_config_reset(struct amdgpu_device *adev) { int r; @@ -1291,10 +1310,68 @@ static int cik_asic_reset(struct amdgpu_device *adev) return r; } +static bool cik_asic_supports_baco(struct amdgpu_device *adev) +{ + bool baco_support; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + smu7_asic_get_baco_capability(adev, &baco_support); + break; + default: + baco_support = false; + break; + } + + return baco_support; +} + static enum amd_reset_method cik_asic_reset_method(struct amdgpu_device *adev) { - return AMD_RESET_METHOD_LEGACY; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + /* disable baco reset until it works */ + /* smu7_asic_get_baco_capability(adev, &baco_reset); */ + baco_reset = false; + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_LEGACY; +} + +/** + * cik_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int cik_asic_reset(struct amdgpu_device *adev) +{ + int r; + + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + if (!adev->in_suspend) + amdgpu_inc_vram_lost(adev); + r = smu7_asic_baco_reset(adev); + } else { + r = cik_asic_pci_config_reset(adev); + } + + return r; } static u32 cik_get_config_memsize(struct amdgpu_device *adev) @@ -1384,7 +1461,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) static void cik_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -1419,12 +1495,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(adev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev)) return; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { @@ -1434,14 +1505,17 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL, + &gpu_cfg); tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16); tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL, + tmp16); tmp = RREG32_PCIE(ixPCIE_LC_STATUS1); max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> @@ -1465,15 +1539,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) for (i = 0; i < 10; i++) { /* check status */ - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(adev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; @@ -1486,26 +1568,45 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) msleep(100); /* linkctl */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, + tmp16); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(adev->pdev, + PCI_EXP_LNKCTL, + tmp16); /* linkctl2 */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(adev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; @@ -1520,15 +1621,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK; WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - tmp16 |= 3; /* gen3 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - tmp16 |= 2; /* gen2 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; /* gen1 */ - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK; @@ -1842,6 +1944,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_pcie_usage = &cik_get_pcie_usage, .need_reset_on_init = &cik_need_reset_on_init, .get_pcie_replay_count = &cik_get_pcie_replay_count, + .supports_baco = &cik_asic_supports_baco, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 54c625a2e570..9870bf27870e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev, int cik_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); +int smu7_asic_baco_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 645550e7caf5..40d2ac723dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; int interlace = 0; @@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; u8 *sadb = NULL; @@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d9f470632b2c..898ef72d423c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; int interlace = 0; @@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; u8 *sadb = NULL; @@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 3eb2e7429269..db15a112becc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } - + drm_connector_list_iter_end(&iter); } /** @@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; int interlace = 0; u32 tmp; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u8 *sadb = NULL; int sad_count; u32 tmp; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { u32 tmp = 0; @@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); int bpc = 8; @@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, if (!dig || !dig->afmt) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a16c5e9e610e..f06c9022c1fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp = 0, offset; @@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 offset, tmp; u8 *sadb = NULL; @@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index 844c03868248..d6221298b477 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev) { } +static void df_v1_7_sw_fini(struct amdgpu_device *adev) +{ +} + static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev, bool enable) { @@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev, const struct amdgpu_df_funcs df_v1_7_funcs = { .sw_init = df_v1_7_sw_init, + .sw_fini = df_v1_7_sw_fini, .enable_broadcast_mode = df_v1_7_enable_broadcast_mode, .get_fb_channel_number = df_v1_7_get_fb_channel_number, .get_hbm_channel_number = df_v1_7_get_hbm_channel_number, diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 5850c8e34caa..4043ebcea5de 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, unsigned long flags, address, data; uint32_t ficadl_val, ficadh_val; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); @@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); @@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, lo_addr); @@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, lo_addr); @@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev) adev->df_perfmon_config_assign_mask[i] = 0; } +static void df_v3_6_sw_fini(struct amdgpu_device *adev) +{ + + device_remove_file(adev->dev, &dev_attr_df_cntr_avail); + +} + static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, bool enable) { @@ -261,23 +268,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, { u32 tmp; - /* Put DF on broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, true); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } else { - tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); - tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - tmp |= DF_V3_6_MGCG_DISABLE; - WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); - } + if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable) { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, + mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } - /* Exit broadcast mode */ - adev->df_funcs->enable_broadcast_mode(adev, false); + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); + } } static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, @@ -537,6 +550,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, + .sw_fini = df_v3_6_sw_fini, .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, .get_fb_channel_number = df_v3_6_get_fb_channel_number, .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 957811b73672..98db25215d0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -50,9 +50,6 @@ * Navi10 has two graphic rings to share each graphic pipe. * 1. Primary ring * 2. Async ring - * - * In bring-up phase, it just used primary ring so set gfx ring number as 1 at - * first. */ #define GFX10_NUM_GFX_RINGS 2 #define GFX10_MEC_HPD_SIZE 2048 @@ -93,7 +90,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -117,17 +114,20 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000) }; static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = @@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -162,16 +162,19 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000), }; static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = @@ -179,7 +182,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -564,6 +567,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev) kfree(adev->gfx.rlc.register_list_format); } +static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) +{ + adev->gfx.cp_fw_write_wait = false; + + switch (adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: + if ((adev->gfx.me_fw_version >= 0x00000046) && + (adev->gfx.me_feature_version >= 27) && + (adev->gfx.pfp_fw_version >= 0x00000068) && + (adev->gfx.pfp_feature_version >= 27) && + (adev->gfx.mec_fw_version >= 0x0000005b) && + (adev->gfx.mec_feature_version >= 27)) + adev->gfx.cp_fw_write_wait = true; + break; + default: + break; + } + + if (adev->gfx.cp_fw_write_wait == false) + DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ + GRBM requires 1-cycle delay in cp firmware\n"); +} + + static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) { const struct rlc_firmware_header_v2_1 *rlc_hdr; @@ -585,11 +614,29 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); } +static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) +{ + bool ret = false; + + switch (adev->pdev->revision) { + case 0xc2: + case 0xc3: + ret = true; + break; + default: + ret = false; + break; + } + + return ret ; +} + static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_NAVI10: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; default: break; @@ -664,59 +711,61 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); - err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.rlc_fw); - rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; - version_major = le16_to_cpu(rlc_hdr->header.header_version_major); - version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); - if (version_major == 2 && version_minor == 1) - adev->gfx.rlc.is_rlc_v2_1 = true; - - adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); - adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); - adev->gfx.rlc.save_and_restore_offset = + if (!amdgpu_sriov_vf(adev)) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); + if (version_major == 2 && version_minor == 1) + adev->gfx.rlc.is_rlc_v2_1 = true; + + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); + adev->gfx.rlc.save_and_restore_offset = le32_to_cpu(rlc_hdr->save_and_restore_offset); - adev->gfx.rlc.clear_state_descriptor_offset = + adev->gfx.rlc.clear_state_descriptor_offset = le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); - adev->gfx.rlc.avail_scratch_ram_locations = + adev->gfx.rlc.avail_scratch_ram_locations = le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); - adev->gfx.rlc.reg_restore_list_size = + adev->gfx.rlc.reg_restore_list_size = le32_to_cpu(rlc_hdr->reg_restore_list_size); - adev->gfx.rlc.reg_list_format_start = + adev->gfx.rlc.reg_list_format_start = le32_to_cpu(rlc_hdr->reg_list_format_start); - adev->gfx.rlc.reg_list_format_separate_start = + adev->gfx.rlc.reg_list_format_separate_start = le32_to_cpu(rlc_hdr->reg_list_format_separate_start); - adev->gfx.rlc.starting_offsets_start = + adev->gfx.rlc.starting_offsets_start = le32_to_cpu(rlc_hdr->starting_offsets_start); - adev->gfx.rlc.reg_list_format_size_bytes = + adev->gfx.rlc.reg_list_format_size_bytes = le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); - adev->gfx.rlc.reg_list_size_bytes = + adev->gfx.rlc.reg_list_size_bytes = le32_to_cpu(rlc_hdr->reg_list_size_bytes); - adev->gfx.rlc.register_list_format = + adev->gfx.rlc.register_list_format = kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + - adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); - if (!adev->gfx.rlc.register_list_format) { - err = -ENOMEM; - goto out; - } + adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); + if (!adev->gfx.rlc.register_list_format) { + err = -ENOMEM; + goto out; + } - tmp = (unsigned int *)((uintptr_t)rlc_hdr + - le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) - adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); - adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; + adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; - tmp = (unsigned int *)((uintptr_t)rlc_hdr + - le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) - adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); - if (adev->gfx.rlc.is_rlc_v2_1) - gfx_v10_0_init_rlc_ext_microcode(adev); + if (adev->gfx.rlc.is_rlc_v2_1) + gfx_v10_0_init_rlc_ext_microcode(adev); + } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); @@ -832,6 +881,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) } } + gfx_v10_0_check_fw_write_wait(adev); out: if (err) { dev_err(adev->dev, @@ -966,39 +1016,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1442,7 +1459,7 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v10_0_pfp_fini(adev); @@ -1758,27 +1775,18 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); } -static void gfx_v10_0_init_csb(struct amdgpu_device *adev) +static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); + /* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); -} - -static void gfx_v10_0_init_pg(struct amdgpu_device *adev) -{ - int i; - - gfx_v10_0_init_csb(adev); - - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); - /* TODO: init power gating */ - return; + return 0; } void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) @@ -1873,18 +1881,16 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) { int r; - if (amdgpu_sriov_vf(adev)) - return 0; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); if (r) return r; - gfx_v10_0_init_pg(adev); - /* enable RLC SRM */ - gfx_v10_0_rlc_enable_srm(adev); + gfx_v10_0_init_csb(adev); + if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ + gfx_v10_0_rlc_enable_srm(adev); } else { adev->gfx.rlc.funcs->stop(adev); @@ -1906,7 +1912,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } - gfx_v10_0_init_pg(adev); + gfx_v10_0_init_csb(adev); + adev->gfx.rlc.funcs->start(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { @@ -2373,7 +2380,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) return 0; } -static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { int i; u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); @@ -2386,7 +2393,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) adev->gfx.gfx_ring[i].sched.ready = false; } WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); - udelay(50); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); + + return 0; } static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) @@ -2443,7 +2460,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -2513,7 +2530,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -2582,7 +2599,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -2757,7 +2774,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) /* Init gfx ring 0 for pipe 0 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); - mutex_unlock(&adev->srbm_mutex); + /* Set ring buffer size */ ring = &adev->gfx.gfx_ring[0]; rb_bufsz = order_base_2(ring->ring_size / 8); @@ -2795,11 +2812,11 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1); gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); /* Init gfx ring 1 for pipe 1 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); - mutex_unlock(&adev->srbm_mutex); ring = &adev->gfx.gfx_ring[1]; rb_bufsz = order_base_2(ring->ring_size / 8); tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); @@ -2829,6 +2846,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); /* Switch to pipe 0 */ mutex_lock(&adev->srbm_mutex); @@ -2903,7 +2921,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -3087,6 +3105,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v10_gfx_mqd *mqd = ring->mqd_ptr; + int mqd_idx = ring - &adev->gfx.gfx_ring[0]; if (!adev->in_gpu_reset && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); @@ -3098,14 +3117,15 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) #endif nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) - memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd)); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else if (adev->in_gpu_reset) { /* reset mqd with the backup copy */ - if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) - memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd)); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; + adev->wb.wb[ring->wptr_offs] = 0; amdgpu_ring_clear_ring(ring); #ifdef BRING_UP_DEBUG mutex_lock(&adev->srbm_mutex); @@ -3705,10 +3725,6 @@ static int gfx_v10_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = gfx_v10_0_csb_vram_pin(adev); - if (r) - return r; - if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev); @@ -3791,12 +3807,11 @@ static int gfx_v10_0_hw_fini(void *handle) if (amdgpu_gfx_disable_kcq(adev)) DRM_ERROR("KCQ disable failed\n"); if (amdgpu_sriov_vf(adev)) { - pr_debug("For SRIOV client, shouldn't do anything.\n"); + gfx_v10_0_cp_gfx_enable(adev, false); return 0; } gfx_v10_0_cp_enable(adev, false); gfx_v10_0_enable_gui_idle_interrupt(adev, false); - gfx_v10_0_csb_vram_unpin(adev); return 0; } @@ -4357,7 +4372,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { @@ -4377,8 +4392,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, - adev->nbio_funcs->get_hdp_flush_req_offset(adev), - adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), ref_and_mask, ref_and_mask, 0x20); } @@ -4765,6 +4780,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); } +static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + struct amdgpu_device *adev = ring->adev; + bool fw_version_ok = false; + + fw_version_ok = adev->gfx.cp_fw_write_wait; + + if (fw_version_ok) + gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, + ref, mask, 0x20); + else + amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, + ref, mask); +} + static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -5155,6 +5188,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_tmz = gfx_v10_0_ring_emit_tmz, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -5188,6 +5222,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { @@ -5218,6 +5253,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .emit_rreg = gfx_v10_0_ring_emit_rreg, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, }; static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5283,15 +5319,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) { - /* init asic gds info */ - switch (adev->asic_type) { - case CHIP_NAVI10: - default: - adev->gds.gds_size = 0x10000; - adev->gds.gds_compute_max_wave_id = 0x4ff; - break; - } + unsigned total_cu = adev->gfx.config.max_cu_per_sh * + adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines; + adev->gds.gds_size = 0x10000; + adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; adev->gds.gws_size = 64; adev->gds.oa_size = 16; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7f0a63628c43..31f44d05e606 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1576,7 +1576,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev) static void gfx_v6_0_constants_init(struct amdgpu_device *adev) { u32 gb_addr_config = 0; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; @@ -1678,7 +1678,6 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev) WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 791ba398f007..8f20a5dd44fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4258,7 +4258,7 @@ static int gfx_v7_0_late_init(void *handle) static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; @@ -4335,7 +4335,6 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; @@ -4554,6 +4553,8 @@ static int gfx_v7_0_hw_init(void *handle) gfx_v7_0_constants_init(adev); + /* init CSB */ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* init rlc */ r = adev->gfx.rlc.funcs->resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 87dd55e9d72b..cfc1403fc855 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1710,7 +1677,7 @@ fail: static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; - u32 mc_shared_chmap, mc_arb_ramcfg; + u32 mc_arb_ramcfg; u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; u32 tmp; int ret; @@ -1850,7 +1817,6 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) break; } - mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; @@ -2103,7 +2069,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); @@ -3917,6 +3883,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v8_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32(mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -4837,10 +4804,6 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); - r = gfx_v8_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4958,8 +4921,6 @@ static int gfx_v8_0_hw_fini(void *handle) pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev); - gfx_v8_0_csb_vram_unpin(adev); - return 0; } @@ -6184,7 +6145,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + amdgpu_ring_write(ring, lower_32_bits(seq - 1)); + amdgpu_ring_write(ring, upper_32_bits(seq - 1)); + + /* Then send the real EOP event down the pipe: + * EVENT_WRITE_EOP - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -6926,7 +6903,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 5 + /* COND_EXEC */ 7 + /* PIPELINE_SYNC */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ - 8 + /* FENCE for VM_FLUSH */ + 12 + /* FENCE for VM_FLUSH */ 20 + /* GDS switch */ 4 + /* double SWITCH_BUFFER, the first COND_EXEC jump to the place just @@ -6938,7 +6915,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 31 + /* DE_META */ 3 + /* CNTX_CTRL */ 5 + /* HDP_INVL */ - 8 + 8 + /* FENCE x2 */ + 12 + 12 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dcadc73bffd2..2616f1b59bbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -48,6 +48,8 @@ #include "amdgpu_ras.h" +#include "sdma0/sdma0_4_0_offset.h" +#include "sdma1/sdma1_4_0_offset.h" #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -517,9 +519,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = @@ -582,9 +584,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = @@ -676,9 +678,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = @@ -691,6 +693,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), }; static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = @@ -973,6 +977,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.me_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false; + if ((adev->gfx.mec_fw_version < 0x000001a5) || + (adev->gfx.mec_feature_version < 46) || + (adev->gfx.pfp_fw_version < 0x000000b7) || + (adev->gfx.pfp_feature_version < 46)) + DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ + GRBM requires 1-cycle delay in cp firmware\n"); + switch (adev->asic_type) { case CHIP_VEGA10: if ((adev->gfx.me_fw_version >= 0x0000009c) && @@ -1031,8 +1042,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - &&((adev->gfx.rlc_fw_version != 106 && + /* Disable GFXOFF on original raven. There are combinations + * of sbios and platforms that are not stable. + */ + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) + &&((adev->gfx.rlc_fw_version != 106 && adev->gfx.rlc_fw_version < 531) || (adev->gfx.rlc_fw_version == 53815) || (adev->gfx.rlc_feature_version < 1) || @@ -1044,6 +1060,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; break; + case CHIP_RENOIR: + if (adev->pm.pp_feature & PP_GFXOFF_MASK) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; + break; default: break; } @@ -1324,7 +1346,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, /* TODO: Determine if MEC2 JT FW loading can be removed for all GFX V9 asic and above */ - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->asic_type != CHIP_ARCTURUS && + adev->asic_type != CHIP_RENOIR) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; info->fw = adev->gfx.mec2_fw; @@ -1662,39 +1685,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return 0; } -static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1956,190 +1946,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) return 0; } -static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, - struct amdgpu_ngg_buf *ngg_buf, - int size_se, - int default_size_se) -{ - int r; - - if (size_se < 0) { - dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); - return -EINVAL; - } - size_se = size_se ? size_se : default_size_se; - - ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; - r = amdgpu_bo_create_kernel(adev, ngg_buf->size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &ngg_buf->bo, - &ngg_buf->gpu_addr, - NULL); - if (r) { - dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); - return r; - } - ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); - - return r; -} - -static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < NGG_BUF_MAX; i++) - amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, - &adev->gfx.ngg.buf[i].gpu_addr, - NULL); - - memset(&adev->gfx.ngg.buf[0], 0, - sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); - - adev->gfx.ngg.init = false; - - return 0; -} - -static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) -{ - int r; - - if (!amdgpu_ngg || adev->gfx.ngg.init == true) - return 0; - - /* GDS reserve memory: 64 bytes alignment */ - adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); - adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size; - adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); - adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); - - /* Primitive Buffer */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], - amdgpu_prim_buf_per_se, - 64 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Primitive Buffer\n"); - goto err; - } - - /* Position Buffer */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], - amdgpu_pos_buf_per_se, - 256 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Position Buffer\n"); - goto err; - } - - /* Control Sideband */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], - amdgpu_cntl_sb_buf_per_se, - 256); - if (r) { - dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); - goto err; - } - - /* Parameter Cache, not created by default */ - if (amdgpu_param_buf_per_se <= 0) - goto out; - - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], - amdgpu_param_buf_per_se, - 512 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Parameter Cache\n"); - goto err; - } - -out: - adev->gfx.ngg.init = true; - return 0; -err: - gfx_v9_0_ngg_fini(adev); - return r; -} - -static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; - int r; - u32 data, base; - - if (!amdgpu_ngg) - return 0; - - /* Program buffer size */ - data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, - adev->gfx.ngg.buf[NGG_PRIM].size >> 8); - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, - adev->gfx.ngg.buf[NGG_POS].size >> 8); - WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); - - data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, - adev->gfx.ngg.buf[NGG_CNTL].size >> 8); - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, - adev->gfx.ngg.buf[NGG_PARAM].size >> 10); - WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); - - /* Program buffer base address */ - base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); - data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); - data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); - - base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); - data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); - data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); - - base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); - data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); - data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); - - /* Clear GDS reserved memory */ - r = amdgpu_ring_alloc(ring, 17); - if (r) { - DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n", - ring->name, r); - return r; - } - - gfx_v9_0_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), - (adev->gds.gds_size + - adev->gfx.ngg.gds_reserve_size)); - - amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); - amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | - PACKET3_DMA_DATA_DST_SEL(1) | - PACKET3_DMA_DATA_SRC_SEL(2))); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | - adev->gfx.ngg.gds_reserve_size); - - gfx_v9_0_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); - - amdgpu_ring_commit(ring); - - return 0; -} - static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) { @@ -2307,10 +2113,6 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; - r = gfx_v9_0_ngg_init(adev); - if (r) - return r; - return 0; } @@ -2320,19 +2122,7 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && - adev->gfx.ras_if) { - struct ras_common_if *ras_if = adev->gfx.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } + amdgpu_gfx_ras_fini(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -2340,11 +2130,10 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - gfx_v9_0_ngg_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, @@ -2583,6 +2372,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -2912,7 +2702,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - gfx_v9_1_init_rlc_save_restore_list(adev); + if (adev->asic_type == CHIP_VEGA12 || + (adev->asic_type == CHIP_RAVEN && + adev->rev_id >= 8)) + gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); } @@ -3871,10 +3664,6 @@ static int gfx_v9_0_hw_init(void *handle) gfx_v9_0_constants_init(adev); - r = gfx_v9_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3883,12 +3672,6 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - if (adev->asic_type != CHIP_ARCTURUS) { - r = gfx_v9_0_ngg_en(adev); - if (r) - return r; - } - return r; } @@ -3930,8 +3713,10 @@ static int gfx_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - /* disable KCQ to avoid CPC touch memory not valid anymore */ - gfx_v9_0_kcq_disable(adev); + /* DF freeze and kcq disable will fail */ + if (!amdgpu_ras_intr_triggered()) + /* disable KCQ to avoid CPC touch memory not valid anymore */ + gfx_v9_0_kcq_disable(adev); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); @@ -3960,8 +3745,6 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev); - gfx_v9_0_csb_vram_unpin(adev); - return 0; } @@ -4067,9 +3850,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { + uint32_t tmp, lsb, msb, i = 0; + do { + if (i != 0) + udelay(1); + tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); + lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); + msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); + i++; + } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); + clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } mutex_unlock(&adev->gfx.gpu_clock_mutex); return clock; } @@ -4142,24 +3938,37 @@ static const struct soc15_reg_entry vgpr_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ }; -static const struct soc15_reg_entry sgpr_init_regs[] = { - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */ - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 }, +static const struct soc15_reg_entry sgpr1_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, +}; + +static const struct soc15_reg_entry sgpr2_init_regs[] = { + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */ + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, }; @@ -4184,6 +3993,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16}, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6}, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16}, @@ -4196,6 +4006,9 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, + { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1}, + { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1}, + { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, }; static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) @@ -4203,6 +4016,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; int i, r; + /* only support when RAS is enabled */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return 0; + r = amdgpu_ring_alloc(ring, 7); if (r) { DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n", @@ -4261,7 +4078,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) total_size = ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4; total_size += - ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4; + ((ARRAY_SIZE(sgpr1_init_regs) * 3) + 4 + 5 + 2) * 4; + total_size += + ((ARRAY_SIZE(sgpr2_init_regs) * 3) + 4 + 5 + 2) * 4; total_size = ALIGN(total_size, 256); vgpr_offset = total_size; total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); @@ -4304,7 +4123,35 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 128; /* x */ + ib.ptr[ib.length_dw++] = 0x40*2; /* x */ + ib.ptr[ib.length_dw++] = 1; /* y */ + ib.ptr[ib.length_dw++] = 1; /* z */ + ib.ptr[ib.length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* write CS partial flush packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); + ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); + + /* SGPR1 */ + /* write the register state for the compute dispatch */ + for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i++) { + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i]) + - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value; + } + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO) + - PACKET3_SET_SH_REG_START; + ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); + ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); + + /* write dispatch packet */ + ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4314,13 +4161,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); - /* SGPR */ + /* SGPR2 */ /* write the register state for the compute dispatch */ - for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) { + for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i++) { ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); - ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i]) + ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i]) - PACKET3_SET_SH_REG_START; - ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value; + ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value; } /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; @@ -4332,7 +4179,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) /* write dispatch packet */ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); - ib.ptr[ib.length_dw++] = 128; /* x */ + ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ ib.ptr[ib.length_dw++] = 1; /* y */ ib.ptr[ib.length_dw++] = 1; /* z */ ib.ptr[ib.length_dw++] = @@ -4393,34 +4240,11 @@ static int gfx_v9_0_early_init(void *handle) return 0; } -static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry); - static int gfx_v9_0_ecc_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = &adev->gfx.ras_if; - struct ras_ih_if ih_info = { - .cb = gfx_v9_0_process_ras_data_cb, - }; - struct ras_fs_if fs_info = { - .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", - }; - struct ras_common_if ras_block = { - .block = AMDGPU_RAS_BLOCK__GFX, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "gfx", - }; int r; - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { - amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); - return 0; - } - r = gfx_v9_0_do_edc_gds_workarounds(adev); if (r) return r; @@ -4430,72 +4254,11 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__GFX); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__GFX); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info.head = **ras_if; - - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - - amdgpu_ras_debugfs_create(adev, &fs_info); - - r = amdgpu_ras_sysfs_create(adev, &fs_info); - if (r) - goto sysfs; -resume: - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + r = amdgpu_gfx_ras_late_init(adev); if (r) - goto irq; + return r; return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; } static int gfx_v9_0_late_init(void *handle) @@ -4560,16 +4323,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, { amdgpu_gfx_rlc_enter_safe_mode(adev); - if (is_support_sw_smu(adev) && !enable) - smu_set_gfx_cgpg(&adev->smu, enable); - if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); } else { gfx_v9_0_enable_gfx_cg_power_gating(adev, false); - gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } amdgpu_gfx_rlc_exit_safe_mode(adev); @@ -4838,8 +4599,6 @@ static int gfx_v9_0_set_powergating_state(void *handle, gfx_v9_0_enable_cp_power_gating(adev, false); /* update gfx cgpg state */ - if (is_support_sw_smu(adev) && enable) - smu_set_gfx_cgpg(&adev->smu, enable); gfx_v9_0_update_gfx_cg_power_gating(adev, enable); /* update mgcg state */ @@ -4970,7 +4729,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { @@ -4990,8 +4749,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, - adev->nbio_funcs->get_hdp_flush_req_offset(adev), - adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), ref_and_mask, ref_and_mask, 0x20); } @@ -5723,313 +5482,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry) -{ - /* TODO ue will trigger an interrupt. */ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->gfx.funcs->query_ras_error_count) - adev->gfx.funcs->query_ras_error_count(adev, err_data); - amdgpu_ras_reset_gpu(adev, 0); - return AMDGPU_RAS_SUCCESS; -} - -static const struct { - const char *name; - uint32_t ip; - uint32_t inst; - uint32_t seg; - uint32_t reg_offset; - uint32_t per_se_instance; - int32_t num_instance; - uint32_t sec_count_mask; - uint32_t ded_count_mask; -} gfx_ras_edc_regs[] = { - { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, - REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT), - REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, - { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, - REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT), - REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) }, - { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 }, - { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 }, - { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT), - REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) }, - { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 }, - { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), - REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) }, - { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT), - REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) }, - { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 }, - { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 }, - { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 }, - { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) }, - { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 }, + +static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = { + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) + }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) + }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1), + 0, 0 + }, + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2), + 0, 0 + }, + { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) + }, + { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT), + 0, 0 + }, + { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT) + }, + { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), + SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT) + }, + { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1), + 0, 0 + }, + { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1), + 0, 0 + }, + { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1), + 0, 0 + }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) + }, + { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), + 0, 0 + }, { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), - 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) + }, { "GDS_OA_PHY_PHY_CMD_RAM_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) + }, { "GDS_OA_PHY_PHY_DATA_RAM_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), + 0, 0 + }, { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, - { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1, - REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 }, - { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, - { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 }, - { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 }, - { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 }, - { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 }, - { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, - REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 }, - { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, - REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 }, - { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, - { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, - { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, - { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, - { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, - { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 }, - { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 }, - { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 }, - { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 }, - { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 }, - { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 }, - { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 }, - { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 }, - { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) + }, + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), + 0, 0 + }, + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) + }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), + 0, 0 + }, + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), + 0, 0 + }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) + }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) + }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) + }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) + }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) + }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), + 0, 0 + }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), + 0, 0 + }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), + 0, 0 + }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), + 0, 0 + }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), + 0, 0 + }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), + 0, 0 + }, + { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), + 0, 0 + }, { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), - 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), - 0 }, - { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 }, + SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), + 0, 0 + }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), + 0, 0 + }, { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), - 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), - 0 }, - { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 }, - { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72, - REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 }, - { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, - { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, - { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 }, - { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 }, - { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 }, - { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, - { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, - { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, - { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, - { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 }, - { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) }, - { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) }, - { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) }, - { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) }, - { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) }, - { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) }, - { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), + 0, 0 + }, + { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), + 0, 0 + }, + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), + 0, 0 + }, + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) + }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) + }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), + 0, 0 + }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + 0, 0 + }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), + 0, 0 + }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) + }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) + }, + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) + }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) + }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) + }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) + }, + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) + }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) + }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) + }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) + }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) + }, { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) + }, { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) + }, { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, - { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, - { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, - { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, - { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, - { "SQC_INST_BANKA_UTCL1_MISS_FIFO", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), - 0 }, - { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_DIRTY_BIT_RAM", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 }, - { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, - { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, - { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, - { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, - { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, - { "SQC_INST_BANKB_UTCL1_MISS_FIFO", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), - 0 }, - { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_DIRTY_BIT_RAM", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 }, - { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, - { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, - { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, - { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, - { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, - { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 }, - { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 }, - { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 }, - { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 }, - { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 }, - { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, - { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, - { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, - { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 }, - { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 }, - { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 }, - { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 }, - { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 }, - { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) + }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) + }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) + }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), + 0, 0 + }, + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) + }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) + }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) + }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), + 0, 0 + }, + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) + }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) + }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) + }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) + }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) + }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0 + }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0 + }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0 + }, + { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) + }, + { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) + }, + { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) + }, + { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + 0, 0 + } }; static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, @@ -6078,14 +5970,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, return ret; } +static const char *vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", +}; + +static const char *vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_LOG_FIFO", +}; + +static const char *atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char *atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < 16; i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + + sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < 7; i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + + sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < 4; i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + + sec_count = (data & 0x00006000L) >> 0xd; + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + } + + for (i = 0; i < 32; i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + + sec_count = (data & 0x00006000L) >> 0xd; + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = (data & 0x00018000L) >> 0xf; + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + + return 0; +} + +static int __get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, uint32_t value, + uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) { + if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset || + gc_ras_fields_vg20[i].seg != reg->seg || + gc_ras_fields_vg20[i].inst != reg->inst) + continue; + + sec_cnt = (value & + gc_ras_fields_vg20[i].sec_count_mask) >> + gc_ras_fields_vg20[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + gc_ras_fields_vg20[i].name, + se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + gc_ras_fields_vg20[i].ded_count_mask) >> + gc_ras_fields_vg20[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + gc_ras_fields_vg20[i].name, + se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t sec_count, ded_count; - uint32_t i; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; uint32_t reg_value; - uint32_t se_id, instance_id; if (adev->asic_type != CHIP_VEGA20) return -EINVAL; @@ -6094,71 +6189,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count = 0; mutex_lock(&adev->grbm_idx_mutex); - for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) { - for (instance_id = 0; instance_id < 256; instance_id++) { - for (i = 0; - i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]); - i++) { - if (se_id != 0 && - !gfx_ras_edc_regs[i].per_se_instance) - continue; - if (instance_id >= gfx_ras_edc_regs[i].num_instance) - continue; - - gfx_v9_0_select_se_sh(adev, se_id, 0, - instance_id); - - reg_value = RREG32( - adev->reg_offset[gfx_ras_edc_regs[i].ip] - [gfx_ras_edc_regs[i].inst] - [gfx_ras_edc_regs[i].seg] + - gfx_ras_edc_regs[i].reg_offset); - sec_count = reg_value & - gfx_ras_edc_regs[i].sec_count_mask; - ded_count = reg_value & - gfx_ras_edc_regs[i].ded_count_mask; - if (sec_count) { - DRM_INFO( - "Instance[%d][%d]: SubBlock %s, SEC %d\n", - se_id, instance_id, - gfx_ras_edc_regs[i].name, - sec_count); - err_data->ce_count++; - } - if (ded_count) { - DRM_INFO( - "Instance[%d][%d]: SubBlock %s, DED %d\n", - se_id, instance_id, - gfx_ras_edc_regs[i].name, - ded_count); - err_data->ue_count++; - } + for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { + for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { + for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0, k); + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + if (reg_value) + __get_ras_error_count(&sec_ded_counter_registers[i], + j, k, reg_value, + &sec_count, &ded_count); } } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - return 0; -} + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; -static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->gfx.ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); - ih_data.head = *ras_if; + gfx_v9_0_query_utc_edc_status(adev, err_data); - DRM_ERROR("CP ECC ERROR IRQ\n"); - amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -6325,7 +6378,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = { .set = gfx_v9_0_set_cp_ecc_error_state, - .process = gfx_v9_0_cp_ecc_error_irq, + .process = amdgpu_gfx_cp_ecc_error_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 6ce37ce77d14..e91bd7945777 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp); } @@ -365,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index 5e9ab8eb214a..c0ab71df0d90 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -33,16 +33,31 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); u32 max_region = REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); + u32 max_num_physical_nodes = 0; + u32 max_physical_node_id = 0; + + switch (adev->asic_type) { + case CHIP_VEGA20: + max_num_physical_nodes = 4; + max_physical_node_id = 3; + break; + case CHIP_ARCTURUS: + max_num_physical_nodes = 8; + max_physical_node_id = 7; + break; + default: + return -EINVAL; + } /* PF_MAX_REGION=0 means xgmi is disabled */ if (max_region) { adev->gmc.xgmi.num_physical_nodes = max_region + 1; - if (adev->gmc.xgmi.num_physical_nodes > 4) + if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; adev->gmc.xgmi.physical_node_id = REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION); - if (adev->gmc.xgmi.physical_node_id > 3) + if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; adev->gmc.xgmi.node_segment_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 8b789f750b72..b70c7b483c24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24; } -static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev) +void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */ + int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); - - WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - gfxhub_v2_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); @@ -151,6 +155,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp); tmp = mmGCVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp); tmp = mmGCVM_L2_CNTL4_DEFAULT; @@ -166,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp); } @@ -341,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h index 06807940748b..392b8cd94fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h @@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v2_0_init(struct amdgpu_device *adev); u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev); +void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 241a4e57cf4a..f5725336a5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -219,6 +219,21 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, return req; } +/** + * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore + * + * @adev: amdgpu_device pointer + * @vmhub: vmhub type + * + */ +static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, + uint32_t vmhub) +{ + return ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!amdgpu_sriov_vf(adev))); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -229,12 +244,35 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int vmhub, uint32_t flush_type) { + bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; + spin_lock(&adev->gmc.invalidate_lock); + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) { + for (i = 0; i < adev->usec_timeout; i++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* @@ -254,6 +292,16 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, udelay(1); } + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + + spin_unlock(&adev->gmc.invalidate_lock); + if (i < adev->usec_timeout) return; @@ -278,7 +326,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, int r; /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); mutex_lock(&adev->mman.gtt_window_lock); @@ -292,7 +340,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || - adev->in_gpu_reset) { + adev->in_gpu_reset || + ring->sched.ready == false) { gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); mutex_unlock(&adev->mman.gtt_window_lock); return; @@ -309,6 +358,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); job->vm_needs_flush = true; + job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; amdgpu_ring_pad_ib(ring, &job->ibs[0]); r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); @@ -333,21 +383,41 @@ error_alloc: static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { + bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), upper_32_bits(pd_addr)); - amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); + amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, + hub->vm_inv_eng0_ack + eng, + req, 1 << vmid); - /* wait for the invalidate to complete */ - amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng, - 1 << vmid, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); return pd_addr; } @@ -397,43 +467,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid * 1 system * 0 valid */ -static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - switch (flags & AMDGPU_VM_MTYPE_MASK) { +static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) +{ + switch (flags) { case AMDGPU_VM_MTYPE_DEFAULT: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); case AMDGPU_VM_MTYPE_NC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); case AMDGPU_VM_MTYPE_WC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); case AMDGPU_VM_MTYPE_CC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); case AMDGPU_VM_MTYPE_UC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); default: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); } - - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; } static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, @@ -460,12 +510,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, } } +static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; + *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags |= AMDGPU_PTE_SNOOPED; + *flags |= AMDGPU_PTE_LOG; + *flags |= AMDGPU_PTE_SYSTEM; + *flags &= ~AMDGPU_PTE_VALID; + } +} + static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, - .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags, - .get_vm_pde = gmc_v10_0_get_vm_pde + .map_mtype = gmc_v10_0_map_mtype, + .get_vm_pde = gmc_v10_0_get_vm_pde, + .get_vm_pte = gmc_v10_0_get_vm_pte }; static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -519,8 +589,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = 0; - if (!amdgpu_sriov_vf(adev)) - base = gfxhub_v2_0_get_fb_location(adev); + base = gfxhub_v2_0_get_fb_location(adev); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); @@ -540,24 +609,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) { - int chansize, numchan; - - if (!amdgpu_emu_mode) - adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); - else { - /* hard code vram_width for emulation */ - chansize = 128; - numchan = 1; - adev->gmc.vram_width = numchan * chansize; - } - /* Could aper size report 0 ? */ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->gmc.mc_vram_size = - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; adev->gmc.visible_vram_size = adev->gmc.aper_size; @@ -636,7 +694,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) static int gmc_v10_0_sw_init(void *handle) { - int r; + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v2_0_init(adev); @@ -644,7 +702,15 @@ static int gmc_v10_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (!amdgpu_emu_mode) + adev->gmc.vram_width = vram_width; + else + adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: @@ -794,7 +860,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); /* Flush HDP after it is initialized */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9fb1765e92d1..b205039350b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { @@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, .set_prt = gmc_v6_0_set_prt, .get_vm_pde = gmc_v6_0_get_vm_pde, - .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags + .get_vm_pte = gmc_v6_0_get_vm_pte, }; static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0c3d9bc3a641..f08e5330642d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } -static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .set_prt = gmc_v7_0_set_prt, - .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, - .get_vm_pde = gmc_v7_0_get_vm_pde + .get_vm_pde = gmc_v7_0_get_vm_pde, + .get_vm_pte = gmc_v7_0_get_vm_pte }; static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index ea764dd9245d..6d96d40fbcb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, * 0 valid */ -static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .set_prt = gmc_v8_0_set_prt, - .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, - .get_vm_pde = gmc_v8_0_get_vm_pde + .get_vm_pde = gmc_v8_0_get_vm_pde, + .get_vm_pte = gmc_v8_0_get_vm_pte }; static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f91337030dc0..fa025ceeea0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -51,10 +51,12 @@ #include "gfxhub_v1_1.h" #include "mmhub_v9_4.h" #include "umc_v6_1.h" +#include "umc_v6_0.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" /* add these here since we already include dce12 headers and these are for DCN */ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d @@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, return 0; } -static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry) -{ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, err_data); - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - if (adev->umc.funcs->query_ras_error_address) - adev->umc.funcs->query_ras_error_address(adev, err_data); - - /* only uncorrectable error needs gpu reset */ - if (err_data->ue_count) - amdgpu_ras_reset_gpu(adev, 0); - - return AMDGPU_RAS_SUCCESS; -} - -static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->gmc.umc_ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; - - ih_data.head = *ras_if; - - amdgpu_ras_interrupt_dispatch(adev, &ih_data); - return 0; -} - static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, } /* If it's the first fault for this address, process it normally */ + if (retry_fault && !in_interrupt() && + amdgpu_vm_handle_fault(adev, entry->pasid, addr)) + return 1; /* This also prevents sending it to KFD */ + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to @@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { .set = gmc_v9_0_ecc_interrupt_state, - .process = gmc_v9_0_process_ecc_irq, + .process = amdgpu_umc_process_ecc_irq, }; static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) @@ -448,6 +416,24 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, return req; } +/** + * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore + * + * @adev: amdgpu_device pointer + * @vmhub: vmhub type + * + */ +static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, + uint32_t vmhub) +{ + return ((vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) && + (!amdgpu_sriov_vf(adev)) && + (!(adev->asic_type == CHIP_RAVEN && + adev->rev_id < 0x8 && + adev->pdev->device == 0x15d8))); +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -467,6 +453,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; u32 j, tmp; struct amdgpu_vmhub *hub; @@ -491,6 +478,28 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } spin_lock(&adev->gmc.invalidate_lock); + + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) { + for (j = 0; j < adev->usec_timeout; j++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (j >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* @@ -506,7 +515,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, break; udelay(1); } + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + spin_unlock(&adev->gmc.invalidate_lock); + if (j < adev->usec_timeout) return; @@ -516,11 +535,25 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) { + bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -531,6 +564,14 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } @@ -584,44 +625,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, * 0 valid */ -static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) +static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) { - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - - switch (flags & AMDGPU_VM_MTYPE_MASK) { + switch (flags) { case AMDGPU_VM_MTYPE_DEFAULT: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); case AMDGPU_VM_MTYPE_NC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); case AMDGPU_VM_MTYPE_WC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); + case AMDGPU_VM_MTYPE_RW: + return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); case AMDGPU_VM_MTYPE_CC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); case AMDGPU_VM_MTYPE_UC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); default: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); } - - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; } static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, @@ -648,12 +670,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, } } +static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; + *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags &= ~AMDGPU_PTE_VALID; + } + + if (adev->asic_type == CHIP_ARCTURUS && + !(*flags & AMDGPU_PTE_SYSTEM) && + mapping->bo_va->is_xgmi) + *flags |= AMDGPU_PTE_SNOOPED; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, - .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, - .get_vm_pde = gmc_v9_0_get_vm_pde + .map_mtype = gmc_v9_0_map_mtype, + .get_vm_pde = gmc_v9_0_get_vm_pde, + .get_vm_pte = gmc_v9_0_get_vm_pte }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -664,11 +708,22 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_VEGA10: + adev->umc.funcs = &umc_v6_0_funcs; + break; case CHIP_VEGA20: adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; - adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; + adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; + adev->umc.funcs = &umc_v6_1_funcs; + break; + case CHIP_ARCTURUS: + adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.funcs = &umc_v6_1_funcs; break; @@ -681,7 +736,10 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA20: - adev->mmhub_funcs = &mmhub_v1_0_funcs; + adev->mmhub.funcs = &mmhub_v1_0_funcs; + break; + case CHIP_ARCTURUS: + adev->mmhub.funcs = &mmhub_v9_4_funcs; break; default: break; @@ -762,140 +820,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) return 0; } -static int gmc_v9_0_ecc_ras_block_late_init(void *handle, - struct ras_fs_if *fs_info, struct ras_common_if *ras_block) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = NULL; - struct ras_ih_if ih_info = { - .cb = gmc_v9_0_process_ras_data_cb, - }; - int r; - - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) - ras_if = &adev->gmc.umc_ras_if; - else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB) - ras_if = &adev->gmc.mmhub_ras_if; - else - BUG(); - - if (!amdgpu_ras_is_supported(adev, ras_block->block)) { - amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); - return 0; - } - - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = *ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info->head = **ras_if; - - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - } - - amdgpu_ras_debugfs_create(adev, fs_info); - - r = amdgpu_ras_sysfs_create(adev, fs_info); - if (r) - goto sysfs; -resume: - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { - r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); - if (r) - goto irq; - } - - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; -} - -static int gmc_v9_0_ecc_late_init(void *handle) -{ - int r; - - struct ras_fs_if umc_fs_info = { - .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", - }; - struct ras_common_if umc_ras_block = { - .block = AMDGPU_RAS_BLOCK__UMC, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "umc", - }; - struct ras_fs_if mmhub_fs_info = { - .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", - }; - struct ras_common_if mmhub_ras_block = { - .block = AMDGPU_RAS_BLOCK__MMHUB, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "mmhub", - }; - - r = gmc_v9_0_ecc_ras_block_late_init(handle, - &umc_fs_info, &umc_ras_block); - if (r) - return r; - - r = gmc_v9_0_ecc_ras_block_late_init(handle, - &mmhub_fs_info, &mmhub_ras_block); - return r; -} - static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool r; + int r; if (!gmc_v9_0_keep_stolen_memory(adev)) amdgpu_bo_late_init(adev); @@ -908,6 +836,7 @@ static int gmc_v9_0_late_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA20: + case CHIP_ARCTURUS: r = amdgpu_atomfirmware_mem_ecc_supported(adev); if (!r) { DRM_INFO("ECC is not present.\n"); @@ -929,7 +858,7 @@ static int gmc_v9_0_late_init(void *handle) } } - r = gmc_v9_0_ecc_late_init(handle); + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; @@ -970,33 +899,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) { - int chansize, numchan; int r; - if (amdgpu_sriov_vf(adev)) { - /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, - * and DF related registers is not readable, seems hardcord is the - * only way to set the correct vram_width - */ - adev->gmc.vram_width = 2048; - } else if (amdgpu_emu_mode != 1) { - adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); - } - - if (!adev->gmc.vram_width) { - /* hbm memory channel size */ - if (adev->flags & AMD_IS_APU) - chansize = 64; - else - chansize = 128; - - numchan = adev->df_funcs->get_hbm_channel_number(adev); - adev->gmc.vram_width = numchan * chansize; - } - /* size in MB on si */ adev->gmc.mc_vram_size = - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU)) { @@ -1108,7 +1015,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) static int gmc_v9_0_sw_init(void *handle) { - int r; + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v1_0_init(adev); @@ -1119,7 +1026,32 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (amdgpu_sriov_vf(adev)) + /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, + * and DF related registers is not readable, seems hardcord is the + * only way to set the correct vram_width + */ + adev->gmc.vram_width = 2048; + else if (amdgpu_emu_mode != 1) + adev->gmc.vram_width = vram_width; + + if (!adev->gmc.vram_width) { + int chansize, numchan; + + /* hbm memory channel size */ + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; + + numchan = adev->df_funcs->get_hbm_channel_number(adev); + adev->gmc.vram_width = numchan * chansize; + } + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; switch (adev->asic_type) { case CHIP_RAVEN: adev->num_vmhubs = 2; @@ -1240,33 +1172,7 @@ static int gmc_v9_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; void *stolen_vga_buf; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && - adev->gmc.umc_ras_if) { - struct ras_common_if *ras_if = adev->gmc.umc_ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - /* remove fs first */ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - /* remove the IH */ - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } - - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && - adev->gmc.mmhub_ras_if) { - struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if; - - /* remove fs and disable ras feature */ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } - + amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1316,13 +1222,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) */ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { - int r, i; - bool value; - u32 tmp; - - amdgpu_device_program_register_sequence(adev, - golden_settings_vega10_hdp, - ARRAY_SIZE(golden_settings_vega10_hdp)); + int r; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -1332,15 +1232,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - switch (adev->asic_type) { - case CHIP_RAVEN: - /* TODO for renoir */ - mmhub_v1_0_update_power_gating(adev, true); - break; - default: - break; - } - r = gfxhub_v1_0_gart_enable(adev); if (r) return r; @@ -1352,6 +1243,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; + DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->gmc.gart_size >> 20), + (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); + adev->gart.ready = true; + return 0; +} + +static int gmc_v9_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool value; + int r, i; + u32 tmp; + + /* The sequence of these two function calls matters.*/ + gmc_v9_0_init_golden_registers(adev); + + if (adev->mode_info.num_crtc) { + if (adev->asic_type != CHIP_ARCTURUS) { + /* Lockout access through VGA aperture*/ + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + + /* disable VGA render */ + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + } + } + + amdgpu_device_program_register_sequence(adev, + golden_settings_vega10_hdp, + ARRAY_SIZE(golden_settings_vega10_hdp)); + + switch (adev->asic_type) { + case CHIP_RAVEN: + /* TODO for renoir */ + mmhub_v1_0_update_power_gating(adev, true); + break; + case CHIP_ARCTURUS: + WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); + break; + default: + break; + } + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); @@ -1361,7 +1295,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); /* After HDP is initialized, flush HDP.*/ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; @@ -1377,28 +1311,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) for (i = 0; i < adev->num_vmhubs; ++i) gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); - adev->gart.ready = true; - return 0; -} - -static int gmc_v9_0_hw_init(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* The sequence of these two function calls matters.*/ - gmc_v9_0_init_golden_registers(adev); - - if (adev->mode_info.num_crtc) { - /* Lockout access through VGA aperture*/ - WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - - /* disable VGA render */ - WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - } + if (adev->umc.funcs && adev->umc.funcs->init_registers) + adev->umc.funcs->init_registers(adev); r = gmc_v9_0_gart_enable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index 971c0840358f..49e8be761214 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -36,12 +36,4 @@ extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; - -/* amdgpu_amdkfd*.c */ -void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t value); -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t value); -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, - uint32_t vmid, uint64_t value); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c new file mode 100644 index 000000000000..a141408dfb23 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -0,0 +1,585 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" + +#include "vcn/vcn_1_0_offset.h" +#include "vcn/vcn_1_0_sh_mask.h" + +static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev); + +static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) +{ + struct amdgpu_device *adev = ring->adev; + ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + ring->ring[(*ptr)++] = 0; + ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); + } else { + ring->ring[(*ptr)++] = reg_offset; + ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); + } + ring->ring[(*ptr)++] = val; +} + +static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) +{ + struct amdgpu_device *adev = ring->adev; + + uint32_t reg, reg_offset, val, mask, i; + + // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); + reg_offset = (reg << 2); + val = lower_32_bits(ring->gpu_addr); + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); + reg_offset = (reg << 2); + val = upper_32_bits(ring->gpu_addr); + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 3rd to 5th: issue MEM_READ commands + for (i = 0; i <= 2; i++) { + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); + ring->ring[ptr++] = 0; + } + + // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x13; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 7th: program mmUVD_JRBC_RB_REF_DATA + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA); + reg_offset = (reg << 2); + val = 0x1; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x1; + mask = 0x1; + + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); + ring->ring[ptr++] = 0x01400200; + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); + ring->ring[ptr++] = val; + ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + ring->ring[ptr++] = 0; + ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); + } else { + ring->ring[ptr++] = reg_offset; + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); + } + ring->ring[ptr++] = mask; + + //9th to 21st: insert no-op + for (i = 0; i <= 12; i++) { + ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); + ring->ring[ptr++] = 0; + } + + //22nd: reset mmUVD_JRBC_RB_RPTR + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR); + reg_offset = (reg << 2); + val = 0; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); + + //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch + reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL); + reg_offset = (reg << 2); + val = 0x12; + jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val); +} + +/** + * jpeg_v1_0_decode_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v1_0_decode_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v1_0_decode_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); +} + +/** + * jpeg_v1_0_decode_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80010000); +} + +/** + * jpeg_v1_0_decode_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00010000); +} + +/** + * jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + struct amdgpu_device *adev = ring->adev; + + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0xffffffff); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + /* emit trap */ + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer. + */ +static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, + PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val, + uint32_t mask) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask); +} + +static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, + PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); + if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || + ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, + PACKETJ(0, 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG decode TRAP\n"); + + switch (entry->src_id) { + case 126: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +/** + * jpeg_v1_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +int jpeg_v1_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v1_0_set_dec_ring_funcs(adev); + jpeg_v1_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v1_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + */ +int jpeg_v1_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch = + SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v1_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG free up sw allocation + */ +void jpeg_v1_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec); +} + +/** + * jpeg_v1_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +void jpeg_v1_0_start(struct amdgpu_device *adev, int mode) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + + if (mode == 0) { + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | + UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + } + + /* initialize wptr */ + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + /* copy patch commands to the jpeg ring */ + jpeg_v1_0_decode_ring_set_patch_ring(ring, + (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); +} + +static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .nop = PACKET0(0x81ff, 0), + .support_64bit_ptrs = false, + .no_user_fence = true, + .vmhub = AMDGPU_MMHUB_0, + .extra_dw = 64, + .get_rptr = jpeg_v1_0_decode_ring_get_rptr, + .get_wptr = jpeg_v1_0_decode_ring_get_wptr, + .set_wptr = jpeg_v1_0_decode_ring_set_wptr, + .emit_frame_size = + 6 + 6 + /* hdp invalidate / flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */ + 26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */ + 6, + .emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */ + .emit_ib = jpeg_v1_0_decode_ring_emit_ib, + .emit_fence = jpeg_v1_0_decode_ring_emit_fence, + .emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v1_0_decode_ring_nop, + .insert_start = jpeg_v1_0_decode_ring_insert_start, + .insert_end = jpeg_v1_0_decode_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg, + .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = { + .set = jpeg_v1_0_set_interrupt_state, + .process = jpeg_v1_0_process_interrupt, +}; + +static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h new file mode 100644 index 000000000000..bbf33a6a3972 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V1_0_H__ +#define __JPEG_V1_0_H__ + +int jpeg_v1_0_early_init(void *handle); +int jpeg_v1_0_sw_init(void *handle); +void jpeg_v1_0_sw_fini(void *handle); +void jpeg_v1_0_start(struct amdgpu_device *adev, int mode); + +#endif /*__JPEG_V1_0_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c new file mode 100644 index 000000000000..a78292d84854 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -0,0 +1,827 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" + +#include "vcn/vcn_2_0_0_offset.h" +#include "vcn/vcn_2_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff +#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 +#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a +#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb +#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf +#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 +#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed +#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 +#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 +#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 + +static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * jpeg_v2_0_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v2_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v2_0_set_dec_ring_funcs(adev); + jpeg_v2_0_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v2_0_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v2_0_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int r; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); + if (r) + return r; + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + ring = &adev->jpeg.inst->ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; + sprintf(ring->name, "jpeg_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * jpeg_v2_0_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v2_0_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v2_0_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v2_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); + + r = amdgpu_ring_test_helper(ring); + if (!r) + DRM_INFO("JPEG decode initialized successfully.\n"); + + return r; +} + +/** + * jpeg_v2_0_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v2_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) + jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + + ring->sched.ready = false; + + return 0; +} + +/** + * jpeg_v2_0_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v2_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v2_0_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v2_0_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v2_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v2_0_hw_init(adev); + + return r; +} + +static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev) +{ + uint32_t data; + int r = 0; + + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + SOC15_WAIT_ON_RREG(JPEG, 0, + mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); + + if (r) { + DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); + return r; + } + } + + /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + + return 0; +} + +static int jpeg_v2_0_enable_power_gating(struct amdgpu_device* adev) +{ + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { + uint32_t data; + int r = 0; + + data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)); + data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; + data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data); + + data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; + WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data); + + SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS, + (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), + UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); + + if (r) { + DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); + return r; + } + } + + return 0; +} + +static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device* adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JPEG_ENC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device* adev) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data); +} + +/** + * jpeg_v2_0_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_0_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec; + int r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, true); + + /* disable power gating */ + r = jpeg_v2_0_disable_power_gating(adev); + if (r) + return r; + + /* JPEG disable CGC */ + jpeg_v2_0_disable_clock_gating(adev); + + WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v2_0_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_0_stop(struct amdgpu_device *adev) +{ + int r; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable JPEG CGC */ + jpeg_v2_0_enable_clock_gating(adev); + + /* enable power gating */ + r = jpeg_v2_0_enable_power_gating(adev); + if (r) + return r; + + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_jpeg(adev, false); + + return 0; +} + +/** + * jpeg_v2_0_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v2_0_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v2_0_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +/** + * jpeg_v2_0_dec_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80010000); +} + +/** + * jpeg_v2_0_dec_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x68e04); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x00010000); +} + +/** + * jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command + * + * @ring: amdgpu_ring pointer + * @fence: fence to emit + * + * Write a fence and a trap command to the ring. + */ +void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, seq); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x3fbc); + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x1); + + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); +} + +/** + * jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer + * + * @ring: amdgpu_ring pointer + * @ib: indirect buffer to execute + * + * Write ring commands to execute the indirect buffer. + */ +void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid | (vmid << 4))); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, ib->length_dw); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); + + amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET, + 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); + amdgpu_ring_write(ring, 0x2); +} + +void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x01400200); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, val); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE3)); + } + amdgpu_ring_write(ring, mask); +} + +void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t data0, data1, mask; + + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* wait for register write */ + data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data1 = lower_32_bits(pd_addr); + mask = 0xffffffff; + jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); +} + +void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +{ + uint32_t reg_offset = (reg << 2); + + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, + PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); + } else { + amdgpu_ring_write(ring, reg_offset); + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + } + amdgpu_ring_write(ring, val); +} + +void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); + amdgpu_ring_write(ring, 0); + } +} + +static bool jpeg_v2_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); +} + +static int jpeg_v2_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + + SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); + + return ret; +} + +static int jpeg_v2_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (jpeg_v2_0_is_idle(handle)) + return -EBUSY; + jpeg_v2_0_enable_clock_gating(adev); + } else { + jpeg_v2_0_disable_clock_gating(adev); + } + + return 0; +} + +static int jpeg_v2_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v2_0_stop(adev); + else + ret = jpeg_v2_0_start(adev); + + if (!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst->ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { + .name = "jpeg_v2_0", + .early_init = jpeg_v2_0_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_0_sw_init, + .sw_fini = jpeg_v2_0_sw_fini, + .hw_init = jpeg_v2_0_hw_init, + .hw_fini = jpeg_v2_0_hw_fini, + .suspend = jpeg_v2_0_suspend, + .resume = jpeg_v2_0_resume, + .is_idle = jpeg_v2_0_is_idle, + .wait_for_idle = jpeg_v2_0_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_0_set_clockgating_state, + .set_powergating_state = jpeg_v2_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = jpeg_v2_0_dec_ring_get_rptr, + .get_wptr = jpeg_v2_0_dec_ring_get_wptr, + .set_wptr = jpeg_v2_0_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs; + DRM_INFO("JPEG decode is enabled in VM mode\n"); +} + +static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = { + .set = jpeg_v2_0_set_interrupt_state, + .process = jpeg_v2_0_process_interrupt, +}; + +static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->jpeg.inst->irq.num_types = 1; + adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs; +} + +const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &jpeg_v2_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h new file mode 100644 index 000000000000..15a344ed340f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V2_0_H__ +#define __JPEG_V2_0_H__ + +void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); +void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); +void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags); +void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags); +void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); +void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr); +void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); + +extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block; + +#endif /* __JPEG_V2_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c new file mode 100644 index 000000000000..2c58939e6ad0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -0,0 +1,641 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_jpeg.h" +#include "soc15.h" +#include "soc15d.h" +#include "jpeg_v2_0.h" + +#include "vcn/vcn_2_5_offset.h" +#include "vcn/vcn_2_5_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2 + +static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); +static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev); +static int jpeg_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state); + +static int amdgpu_ih_clientid_jpeg[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + +/** + * jpeg_v2_5_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int jpeg_v2_5_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_ARCTURUS) { + u32 harvest; + int i; + + adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->jpeg.harvest_config |= 1 << i; + } + + if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | + AMDGPU_JPEG_HARVEST_JPEG1)) + return -ENOENT; + } else + adev->jpeg.num_jpeg_inst = 1; + + jpeg_v2_5_set_dec_ring_funcs(adev); + jpeg_v2_5_set_irq_funcs(adev); + + return 0; +} + +/** + * jpeg_v2_5_sw_init - sw init for JPEG block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int jpeg_v2_5_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int i, r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], + VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq); + if (r) + return r; + } + + r = amdgpu_jpeg_sw_init(adev); + if (r) + return r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; + sprintf(ring->name, "jpeg_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0); + if (r) + return r; + + adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); + } + + return 0; +} + +/** + * jpeg_v2_5_sw_fini - sw fini for JPEG block + * + * @handle: amdgpu_device pointer + * + * JPEG suspend and free up sw allocation + */ +static int jpeg_v2_5_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_jpeg_suspend(adev); + if (r) + return r; + + r = amdgpu_jpeg_sw_fini(adev); + + return r; +} + +/** + * jpeg_v2_5_hw_init - start and test JPEG block + * + * @handle: amdgpu_device pointer + * + */ +static int jpeg_v2_5_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i, r; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + + DRM_INFO("JPEG decode initialized successfully.\n"); + + return 0; +} + +/** + * jpeg_v2_5_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the JPEG block, mark ring as not ready any more + */ +static int jpeg_v2_5_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) + jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + ring->sched.ready = false; + } + + return 0; +} + +/** + * jpeg_v2_5_suspend - suspend JPEG block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend JPEG block + */ +static int jpeg_v2_5_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = jpeg_v2_5_hw_fini(adev); + if (r) + return r; + + r = amdgpu_jpeg_suspend(adev); + + return r; +} + +/** + * jpeg_v2_5_resume - resume JPEG block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init JPEG block + */ +static int jpeg_v2_5_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_jpeg_resume(adev); + if (r) + return r; + + r = jpeg_v2_5_hw_init(adev); + + return r; +} + +static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) + data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + + data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); + data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JPEG_ENC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL); + data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK + | JPEG_CGC_CTRL__JMCIF_MODE_MASK + | JPEG_CGC_CTRL__JRBBM_MODE_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data); +} + +static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst) +{ + uint32_t data; + + data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE); + data |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JPEG_ENC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data); +} + +/** + * jpeg_v2_5_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = &adev->jpeg.inst[i].ring_dec; + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + jpeg_v2_5_disable_clock_gating(adev, i); + + /* MJPEG global tiling registers */ + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR); + } + + return 0; +} + +/** + * jpeg_v2_5_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_5_stop(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + jpeg_v2_5_enable_clock_gating(adev, i); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + } + + return 0; +} + +/** + * jpeg_v2_5_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR); +} + +/** + * jpeg_v2_5_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR); +} + +/** + * jpeg_v2_5_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } +} + +static bool jpeg_v2_5_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + } + + return ret; +} + +static int jpeg_v2_5_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 0; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret); + if (ret) + return ret; + } + + return ret; +} + +static int jpeg_v2_5_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (enable) { + if (jpeg_v2_5_is_idle(handle)) + return -EBUSY; + jpeg_v2_5_enable_clock_gating(adev, i); + } else { + jpeg_v2_5_disable_clock_gating(adev, i); + } + } + + return 0; +} + +static int jpeg_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if(state == adev->jpeg.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = jpeg_v2_5_stop(adev); + else + ret = jpeg_v2_5_start(adev); + + if(!ret) + adev->jpeg.cur_state = state; + + return ret; +} + +static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + + DRM_DEBUG("IH: JPEG TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { + .name = "jpeg_v2_5", + .early_init = jpeg_v2_5_early_init, + .late_init = NULL, + .sw_init = jpeg_v2_5_sw_init, + .sw_fini = jpeg_v2_5_sw_fini, + .hw_init = jpeg_v2_5_hw_init, + .hw_fini = jpeg_v2_5_hw_fini, + .suspend = jpeg_v2_5_suspend, + .resume = jpeg_v2_5_resume, + .is_idle = jpeg_v2_5_is_idle, + .wait_for_idle = jpeg_v2_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = jpeg_v2_5_set_clockgating_state, + .set_powergating_state = jpeg_v2_5_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = jpeg_v2_5_dec_ring_get_rptr, + .get_wptr = jpeg_v2_5_dec_ring_get_wptr, + .set_wptr = jpeg_v2_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */ + 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */ + .emit_ib = jpeg_v2_0_dec_ring_emit_ib, + .emit_fence = jpeg_v2_0_dec_ring_emit_fence, + .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_jpeg_dec_ring_test_ring, + .test_ib = amdgpu_jpeg_dec_ring_test_ib, + .insert_nop = jpeg_v2_0_dec_ring_nop, + .insert_start = jpeg_v2_0_dec_ring_insert_start, + .insert_end = jpeg_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_jpeg_ring_begin_use, + .end_use = amdgpu_jpeg_ring_end_use, + .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec.me = i; + DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i); + } +} + +static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { + .set = jpeg_v2_5_set_interrupt_state, + .process = jpeg_v2_5_process_interrupt, +}; + +static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + adev->jpeg.inst[i].irq.num_types = 1; + adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + } +} + +const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_JPEG, + .major = 2, + .minor = 5, + .rev = 0, + .funcs = &jpeg_v2_5_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h new file mode 100644 index 000000000000..2b4087c02620 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __JPEG_V2_5_H__ +#define __JPEG_V2_5_H__ + +extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block; + +#endif /* __JPEG_V2_5_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 04cd4b6f95d4..adfd8a6171eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -27,17 +27,13 @@ #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" #include "mmhub/mmhub_1_0_default.h" -#include "mmhub/mmhub_9_4_0_offset.h" #include "vega10_enum.h" - +#include "soc15.h" #include "soc15_common.h" #define mmDAGB0_CNTL_MISC2_RV 0x008f #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 -#define EA_EDC_CNT_MASK 0x3 -#define EA_EDC_CNT_SHIFT 0x2 - u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); @@ -206,6 +202,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp); } @@ -418,6 +416,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = @@ -560,61 +560,194 @@ void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) *flags |= AMD_CG_SUPPORT_MC_LS; } +static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = { + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), + SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + } +}; + +static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0}, +}; + +static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) { + if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v1_0_ras_fields[i].sec_count_mask) >> + mmhub_v1_0_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + mmhub_v1_0_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v1_0_ras_fields[i].ded_count_mask) >> + mmhub_v1_0_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("MMHUB SubBlock %s, DED %d\n", + mmhub_v1_0_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - int i; - uint32_t ea0_edc_cnt, ea0_edc_cnt2; - uint32_t ea1_edc_cnt, ea1_edc_cnt2; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - - /* EDC CNT will be cleared automatically after read */ - ea0_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT_VG20); - ea0_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20); - ea1_edc_cnt = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT_VG20); - ea1_edc_cnt2 = RREG32_SOC15(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20); - - /* error count of each error type is recorded by 2 bits, - * ce and ue count in EDC_CNT - */ - for (i = 0; i < 5; i++) { - err_data->ce_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ce_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; - err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; - } - /* successive ue count in EDC_CNT */ - for (i = 0; i < 5; i++) { - err_data->ue_count += (ea0_edc_cnt & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt & EA_EDC_CNT_MASK); - ea0_edc_cnt >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt >>= EA_EDC_CNT_SHIFT; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); + if (reg_value) + mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); } - /* ce and ue count in EDC_CNT2 */ - for (i = 0; i < 3; i++) { - err_data->ce_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ce_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - } - /* successive ue count in EDC_CNT2 */ - for (i = 0; i < 6; i++) { - err_data->ue_count += (ea0_edc_cnt2 & EA_EDC_CNT_MASK); - err_data->ue_count += (ea1_edc_cnt2 & EA_EDC_CNT_MASK); - ea0_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - ea1_edc_cnt2 >>= EA_EDC_CNT_SHIFT; - } + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; } const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 3542c203c3c8..a7cb185d639a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -31,20 +31,25 @@ #include "soc15_common.h" -static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev) +void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */ + int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - mmhub_v2_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); @@ -137,6 +142,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); tmp = mmMMVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); tmp = mmMMVM_L2_CNTL4_DEFAULT; @@ -152,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); } @@ -332,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h index db16f3ece218..3ea4344f0315 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h @@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev); int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); +void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 0cf7ef44b4b5..d9301e80522a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -21,6 +21,7 @@ * */ #include "amdgpu.h" +#include "amdgpu_ras.h" #include "mmhub_v9_4.h" #include "mmhub/mmhub_9_4_1_offset.h" @@ -29,7 +30,7 @@ #include "athub/athub_1_0_offset.h" #include "athub/athub_1_0_sh_mask.h" #include "vega10_enum.h" - +#include "soc15.h" #include "soc15_common.h" #define MMHUB_NUM_INSTANCES 2 @@ -53,7 +54,7 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) return base; } -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, +static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid, uint32_t vmid, uint64_t value) { /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to @@ -79,7 +80,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, { uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); - mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base); + mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -100,6 +101,16 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, (u32)(adev->gmc.gart_end >> 44)); } +void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) +{ + int i; + + for (i = 0; i < MMHUB_NUM_INSTANCES; i++) + mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid, + page_table_base); +} + static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, int hubid) { @@ -219,6 +230,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid) hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); @@ -240,6 +260,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev, hubid * MMHUB_INSTANCE_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); } @@ -302,7 +324,8 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) adev->vm_manager.block_size - 9); /* Send no-retry XNACK on fault to suppress VM fault storm. */ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, + !amdgpu_noretry); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL, hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i, tmp); @@ -493,6 +516,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_SEM) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; hub[i]->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_INVALIDATE_ENG0_REQ) + @@ -640,3 +667,253 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) *flags |= AMD_CG_SUPPORT_MC_LS; } + +static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { + { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + } +}; + +static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0}, +}; + +static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t value, uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) { + if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset) + continue; + + sec_cnt = (value & + mmhub_v9_4_ras_fields[i].sec_count_mask) >> + mmhub_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("MMHUB SubBlock %s, SEC %d\n", + mmhub_v9_4_ras_fields[i].name, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + mmhub_v9_4_ras_fields[i].ded_count_mask) >> + mmhub_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("MMHUB SubBlock %s, DED %d\n", + mmhub_v9_4_ras_fields[i].name, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i; + uint32_t reg_value; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) { + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + if (reg_value) + mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i], + reg_value, &sec_count, &ded_count); + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; +} + +const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, + .query_ras_error_count = mmhub_v9_4_query_ras_error_count, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h index d435cfcec1a8..1b979773776c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -23,6 +23,8 @@ #ifndef __MMHUB_V9_4_H__ #define __MMHUB_V9_4_H__ +extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs; + u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev); int mmhub_v9_4_gart_enable(struct amdgpu_device *adev); void mmhub_v9_4_gart_disable(struct amdgpu_device *adev); @@ -32,5 +34,7 @@ void mmhub_v9_4_init(struct amdgpu_device *adev); int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags); +void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index cc5bf595f9b1..43305afa3d6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_ai_mailbox_set_valid(adev, false); } -static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) -{ - int r = 0; - u32 req, val, size; - - if (!amdgim_is_hwperf(adev) || buf == NULL) - return -EBADRQC; - - switch(type) { - case PP_SCLK: - req = IDH_IRQ_GET_PP_SCLK; - break; - case PP_MCLK: - req = IDH_IRQ_GET_PP_MCLK; - break; - default: - return -EBADRQC; - } - - mutex_lock(&adev->virt.dpm_mutex); - - xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); - - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); - if (!r && adev->fw_vram_usage.va != NULL) { - val = RREG32_NO_KIQ( - SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); - size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + - val), PAGE_SIZE); - - if (size < PAGE_SIZE) - strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); - else - size = 0; - - r = size; - goto out; - } - - r = xgpu_ai_poll_msg(adev, IDH_FAIL); - if(r) - pr_info("%s DPM request failed", - (type == PP_SCLK)? "SCLK" : "MCLK"); - -out: - mutex_unlock(&adev->virt.dpm_mutex); - return r; -} - -static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) -{ - int r = 0; - u32 req = IDH_IRQ_FORCE_DPM_LEVEL; - - if (!amdgim_is_hwperf(adev)) - return -EBADRQC; - - mutex_lock(&adev->virt.dpm_mutex); - xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); - - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); - if (!r) - goto out; - - r = xgpu_ai_poll_msg(adev, IDH_FAIL); - if (!r) - pr_info("DPM request failed"); - else - pr_info("Mailbox is broken"); - -out: - mutex_unlock(&adev->virt.dpm_mutex); - return r; -} - static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { @@ -455,6 +379,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .reset_gpu = xgpu_ai_request_reset, .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, - .get_pp_clk = xgpu_ai_get_pp_clk, - .force_dpm_level = xgpu_ai_force_dpm_level, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 077e91a33d62..37dbe0f2142f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -35,10 +35,6 @@ enum idh_request { IDH_REL_GPU_FINI_ACCESS, IDH_REQ_GPU_RESET_ACCESS, - IDH_IRQ_FORCE_DPM_LEVEL = 10, - IDH_IRQ_GET_PP_SCLK, - IDH_IRQ_GET_PP_MCLK, - IDH_LOG_VF_ERROR = 200, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c new file mode 100644 index 000000000000..0d8767eb7a70 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -0,0 +1,380 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "nbio/nbio_2_3_offset.h" +#include "nbio/nbio_2_3_sh_mask.h" +#include "gc/gc_10_1_0_offset.h" +#include "gc/gc_10_1_0_sh_mask.h" +#include "soc15.h" +#include "navi10_ih.h" +#include "soc15_common.h" +#include "mxgpu_nv.h" +#include "mxgpu_ai.h" + +static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) +{ + WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); +} + +static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) +{ + WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); +} + +/* + * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine + * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1 + * by host. + * + * if called no in IRQ routine, this peek_msg cannot guaranteed to return the + * correct value since it doesn't return the RCV_DW0 under the case that + * RCV_MSG_VALID is set by host. + */ +static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) +{ + return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); +} + + +static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, + enum idh_event event) +{ + u32 reg; + + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + if (reg != event) + return -ENOENT; + + xgpu_nv_mailbox_send_ack(adev); + + return 0; +} + +static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) +{ + return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; +} + +static int xgpu_nv_poll_ack(struct amdgpu_device *adev) +{ + int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT; + u8 reg; + + do { + reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE); + if (reg & 2) + return 0; + + mdelay(5); + timeout -= 5; + } while (timeout > 1); + + pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT); + + return -ETIME; +} + +static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) +{ + int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT; + + do { + r = xgpu_nv_mailbox_rcv_msg(adev, event); + if (!r) + return 0; + + msleep(10); + timeout -= 10; + } while (timeout > 1); + + pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + + return -ETIME; +} + +static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) +{ + u32 reg; + int r; + uint8_t trn; + + /* IMPORTANT: + * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK + * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK + * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack() + * will return immediatly + */ + do { + xgpu_nv_mailbox_set_valid(adev, false); + trn = xgpu_nv_peek_ack(adev); + if (trn) { + pr_err("trn=%x ACK should not assert! wait again !\n", trn); + msleep(1); + } + } while (trn); + + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0)); + reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, req); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0), + reg); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1), + data1); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2), + data2); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3), + data3); + + xgpu_nv_mailbox_set_valid(adev, true); + + /* start to poll ack */ + r = xgpu_nv_poll_ack(adev); + if (r) + pr_err("Doesn't get ack from pf, continue\n"); + + xgpu_nv_mailbox_set_valid(adev, false); +} + +static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + int r; + + xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); + + /* start to check msg if request is idh_req_gpu_init_access */ + if (req == IDH_REQ_GPU_INIT_ACCESS || + req == IDH_REQ_GPU_FINI_ACCESS || + req == IDH_REQ_GPU_RESET_ACCESS) { + r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + if (r) { + pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); + return r; + } + /* Retrieve checksum from mailbox2 */ + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { + adev->virt.fw_reserve.checksum_key = + RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2)); + } + } + + return 0; +} + +static int xgpu_nv_request_reset(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); +} + +static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_request req; + + req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; + return xgpu_nv_send_access_requests(adev, req); +} + +static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_request req; + int r = 0; + + req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; + r = xgpu_nv_send_access_requests(adev, req); + + return r; +} + +static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("get ack intr and do nothing.\n"); + return 0; +} + +static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static void xgpu_nv_mailbox_flr_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; + int locked; + + /* block amdgpu_gpu_recover till msg FLR COMPLETE received, + * otherwise the mailbox msg will be ruined/reseted by + * the VF FLR. + * + * we can unlock the lock_reset to allow "amdgpu_job_timedout" + * to run gpu_recover() after FLR_NOTIFICATION_CMPL received + * which means host side had finished this VF's FLR. + */ + locked = mutex_trylock(&adev->lock_reset); + if (locked) + adev->in_gpu_reset = 1; + + do { + if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) + goto flr_done; + + msleep(10); + timeout -= 10; + } while (timeout > 1); + +flr_done: + if (locked) { + adev->in_gpu_reset = 0; + mutex_unlock(&adev->lock_reset); + } + + /* Trigger recovery for world switch failure if no TDR */ + if (amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); +} + +static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); + + switch (event) { + case IDH_FLR_NOTIFICATION: + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.flr_work); + break; + /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore + * it byfar since that polling thread will handle it, + * other msg like flr complete is not handled here. + */ + case IDH_CLR_MSG_BUF: + case IDH_FLR_NOTIFICATION_CMPL: + case IDH_READY_TO_ACCESS_GPU: + default: + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = { + .set = xgpu_nv_set_mailbox_ack_irq, + .process = xgpu_nv_mailbox_ack_irq, +}; + +static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = { + .set = xgpu_nv_set_mailbox_rcv_irq, + .process = xgpu_nv_mailbox_rcv_irq, +}; + +void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->virt.ack_irq.num_types = 1; + adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; + adev->virt.rcv_irq.num_types = 1; + adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; +} + +int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + return 0; +} + +int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); + + return 0; +} + +void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) +{ + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); +} + +const struct amdgpu_virt_ops xgpu_nv_virt_ops = { + .req_full_gpu = xgpu_nv_request_full_gpu_access, + .rel_full_gpu = xgpu_nv_release_full_gpu_access, + .reset_gpu = xgpu_nv_request_reset, + .wait_reset = NULL, + .trans_msg = xgpu_nv_mailbox_trans_msg, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h index 19e54acb4125..99b15f6865cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -21,33 +21,21 @@ * */ -#include "kfd_kernel_queue.h" +#ifndef __MXGPU_NV_H__ +#define __MXGPU_NV_H__ -static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_cik(struct kernel_queue *kq); -static void submit_packet_cik(struct kernel_queue *kq); +#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 +#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 -void kernel_queue_init_cik(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_cik; - ops->uninitialize = uninitialize_cik; - ops->submit_packet = submit_packet_cik; -} +extern const struct amdgpu_virt_ops xgpu_nv_virt_ops; -static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - return true; -} +void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev); +int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev); +int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev); +void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev); -static void uninitialize_cik(struct kernel_queue *kq) -{ -} +#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4) +#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1) -static void submit_packet_cik(struct kernel_queue *kq) -{ - *kq->wptr_kernel = kq->pending_wptr; - write_kernel_doorbell(kq->queue->properties.doorbell_ptr, - kq->pending_wptr); -} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 9fe08408db58..9af73567e716 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) /* disable irqs */ navi10_ih_disable_interrupts(adev); - adev->nbio_funcs->ih_control(adev); + adev->nbio.funcs->ih_control(adev); /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); @@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) } WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); - adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell, + adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, ih->doorbell_index); tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c index a56c93620e78..88efaecf9f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi10_ip_offset.h" int navi10_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c index cadc7603ca41..a786d159e5e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi12_ip_offset.h" int navi12_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c index 3b5f0f65e096..4ea1e8fbb601 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi14_ip_offset.h" int navi14_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index c05d78d4efc6..f3a3fe746222 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -27,11 +27,21 @@ #include "nbio/nbio_2_3_default.h" #include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> #define smnPCIE_CONFIG_CNTL 0x11180044 #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 + +static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); @@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0); + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev) @@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { - .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset, @@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .ih_control = nbio_v2_3_ih_control, .init_registers = nbio_v2_3_init_registers, .detect_hw_virt = nbio_v2_3_detect_hw_virt, + .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h index 5ae52085f6b7..a43b60acf7f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 6590143c3f75..635d9e1fc0a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } -static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { +const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { - .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h index 0743a6f016f3..6dc743b73218 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 74eecb768a82..d6cbf26074bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { - .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h index 508d549c5029..e7aefb252550 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 910fffced43b..bb701dbfd472 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -23,10 +23,12 @@ #include "amdgpu.h" #include "amdgpu_atombios.h" #include "nbio_v7_4.h" +#include "amdgpu_ras.h" #include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "nbio/nbio_7_4_0_smn.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c @@ -50,6 +52,9 @@ #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); + static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -266,7 +271,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } -static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { +const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -306,17 +311,228 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - uint32_t def, data; - def = data = RREG32_PCIE(smnPCIE_CI_CNTL); - data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); +} - if (def != data) - WREG32_PCIE(smnPCIE_CI_CNTL, data); +static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + /* + * clear error status after ras_controller_intr according to + * hw team and count ue number for query + */ + nbio_v7_4_query_ras_error_count(adev, &obj->err_data); + + DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); + + /* ras_controller_int is dedicated for nbif ras error, + * not the global interrupt for sync flood + */ + amdgpu_ras_reset_gpu(adev, true); + } +} + +static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + amdgpu_ras_global_ras_isr(adev); + } +} + + +static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_intr_cntl; + + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { + /* set interrupt vector select bit to 0 to select + * vetcor 1 for bare metal case */ + bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, + BIF_INTR_CNTL, + RAS_INTR_VEC_SEL, 0); + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } + + return 0; +} + +static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_intr_cntl; + + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { + /* set interrupt vector select bit to 0 to select + * vetcor 1 for bare metal case */ + bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, + BIF_INTR_CNTL, + RAS_INTR_VEC_SEL, 0); + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } + + return 0; +} + +static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = { + .set = nbio_v7_4_set_ras_controller_irq_state, + .process = nbio_v7_4_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_4_set_ras_err_event_athub_irq_state, + .process = nbio_v7_4_process_err_event_athub_irq, +}; + +static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) +{ + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_4_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + if (r) + return r; + + return 0; +} + +static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) +{ + + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_4_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + if (r) + return r; + + return 0; +} + +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 + +static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + uint32_t global_sts, central_sts, int_eoi, parity_sts; + uint32_t corr, fatal, non_fatal; + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); + fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); + non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, + ParityErrNonFatal); + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); + + if (corr) + err_data->ce_count++; + if (fatal) + err_data->ue_count++; + + if (corr || fatal || non_fatal) { + central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); + /* clear error status register */ + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + + if (fatal) + /* clear parity fatal error indication field */ + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, + parity_sts); + + if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, + BIFL_RasContller_Intr_Recv)) { + /* clear interrupt status register */ + WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts); + int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI); + int_eoi = REG_SET_FIELD(int_eoi, + IOHC_INTERRUPT_EOI, SMI_EOI, 1); + WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi); + } + } +} + +static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { - .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset, @@ -330,6 +546,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, + .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt, .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating, .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep, .get_clockgating_state = nbio_v7_4_get_clockgating_state, @@ -337,4 +554,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .init_registers = nbio_v7_4_init_registers, .detect_hw_virt = nbio_v7_4_detect_hw_virt, .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, + .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, + .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, + .query_ras_error_count = nbio_v7_4_query_ras_error_count, + .ras_late_init = amdgpu_nbio_ras_late_init, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index c442865bac4f..b1ac82872752 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index de9b995b65b1..b0229543e887 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -40,19 +40,23 @@ #include "gc/gc_10_1_0_sh_mask.h" #include "hdp/hdp_5_0_0_offset.h" #include "hdp/hdp_5_0_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" #include "soc15.h" #include "soc15_common.h" #include "gmc_v10_0.h" #include "gfxhub_v2_0.h" #include "mmhub_v2_0.h" +#include "nbio_v2_3.h" #include "nv.h" #include "navi10_ih.h" #include "gfx_v10_0.h" #include "sdma_v5_0.h" #include "vcn_v2_0.h" +#include "jpeg_v2_0.h" #include "dce_virtual.h" #include "mes_v10_1.h" +#include "mxgpu_nv.h" static const struct amd_ip_funcs nv_common_ip_funcs; @@ -63,8 +67,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -78,8 +82,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -119,7 +123,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 nv_get_config_memsize(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_memsize(adev); + return adev->nbio.funcs->get_memsize(adev); } static u32 nv_get_xclk(struct amdgpu_device *adev) @@ -154,8 +158,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev) static bool nv_read_bios_from_rom(struct amdgpu_device *adev, u8 *bios, u32 length_bytes) { - /* TODO: will implement it when SMU header is available */ - return false; + u32 *dw_ptr; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + + /* set rom index to 0 */ + WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + /* read out the rom data */ + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + + return true; } static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { @@ -176,6 +199,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, @@ -279,7 +303,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio_funcs->get_memsize(adev); + u32 memsize = adev->nbio.funcs->get_memsize(adev); if (memsize != 0xffffffff) break; @@ -291,12 +315,22 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) return ret; } +static bool nv_asic_supports_baco(struct amdgpu_device *adev) +{ + struct smu_context *smu = &adev->smu; + + if (smu_baco_is_support(smu)) + return true; + else + return false; +} + static enum amd_reset_method nv_asic_reset_method(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; - if (smu_baco_is_support(smu)) + if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) return AMD_RESET_METHOD_BACO; else return AMD_RESET_METHOD_MODE1; @@ -319,7 +353,12 @@ static int nv_asic_reset(struct amdgpu_device *adev) if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); - ret = smu_baco_reset(smu); + ret = smu_baco_enter(smu); + if (ret) + return ret; + ret = smu_baco_exit(smu); + if (ret) + return ret; } else { if (!adev->in_suspend) amdgpu_inc_vram_lost(adev); @@ -368,8 +407,8 @@ static void nv_program_aspm(struct amdgpu_device *adev) static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version nv_common_ip_block = @@ -423,9 +462,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; - adev->nbio_funcs = &nbio_v2_3_funcs; + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - adev->nbio_funcs->detect_hw_virt(adev); + adev->nbio.funcs->detect_hw_virt(adev); + + if (amdgpu_sriov_vf(adev)) + adev->virt.ops = &xgpu_nv_virt_ops; switch (adev->asic_type) { case CHIP_NAVI10: @@ -435,7 +478,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -446,9 +489,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); if (adev->enable_mes) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); break; @@ -458,7 +502,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -469,9 +513,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; @@ -482,12 +527,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) static uint32_t nv_get_rev_id(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_rev_id(adev); + return adev->nbio.funcs->get_rev_id(adev); } static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - adev->nbio_funcs->hdp_flush(adev, ring); + adev->nbio.funcs->hdp_flush(adev, ring); } static void nv_invalidate_hdp(struct amdgpu_device *adev, @@ -532,6 +577,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev) return false; } +static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) +{ + + /* TODO + * dummy implement for pcie_replay_count sysfs interface + * */ + + return 0; +} + static void nv_init_doorbell_index(struct amdgpu_device *adev) { adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; @@ -579,12 +634,17 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .need_full_reset = &nv_need_full_reset, .get_pcie_usage = &nv_get_pcie_usage, .need_reset_on_init = &nv_need_reset_on_init, + .get_pcie_replay_count = &nv_get_pcie_replay_count, + .supports_baco = &nv_asic_supports_baco, }; static int nv_common_early_init(void *handle) { +#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -615,10 +675,12 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; @@ -635,9 +697,11 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 20; break; @@ -656,9 +720,11 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_ATHUB_LS | - AMD_CG_SUPPORT_VCN_MGCG; + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0xa; break; @@ -667,16 +733,31 @@ static int nv_common_early_init(void *handle) return -EINVAL; } + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_setting(adev); + xgpu_nv_mailbox_set_irq_funcs(adev); + } + return 0; } static int nv_common_late_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_nv_mailbox_get_irq(adev); + return 0; } static int nv_common_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_nv_mailbox_add_irq_id(adev); + return 0; } @@ -694,7 +775,13 @@ static int nv_common_hw_init(void *handle) /* enable aspm */ nv_program_aspm(adev); /* setup nbio registers */ - adev->nbio_funcs->init_registers(adev); + adev->nbio.funcs->init_registers(adev); + /* remap HDP registers to a hole in mmio space, + * for the purpose of expose those registers + * to process space + */ + if (adev->nbio.funcs->remap_hdp_registers) + adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); @@ -856,9 +943,9 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); nv_update_hdp_mem_power_gating(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -886,7 +973,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio_funcs->get_clockgating_state(adev, flags); + adev->nbio.funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_MGCG */ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 5d95e614369a..7539104175e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -40,6 +40,9 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin"); MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); MODULE_FIRMWARE("amdgpu/raven2_asd.bin"); +MODULE_FIRMWARE("amdgpu/picasso_ta.bin"); +MODULE_FIRMWARE("amdgpu/raven2_ta.bin"); +MODULE_FIRMWARE("amdgpu/raven_ta.bin"); static int psp_v10_0_init_microcode(struct psp_context *psp) { @@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) char fw_name[30]; int err = 0; const struct psp_firmware_header_v1_0 *hdr; - + const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); switch (adev->asic_type) { @@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) adev->psp.asd_start_addr = (uint8_t *)hdr + le32_to_cpu(hdr->header.ucode_array_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v10.0: Failed to load firmware \"%s\"\n", + fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *) + adev->psp.ta_fw->data; + adev->psp.ta_hdcp_ucode_version = + le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); + adev->psp.ta_hdcp_ucode_size = + le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); + adev->psp.ta_hdcp_start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.ta_dtm_ucode_version = + le32_to_cpu(ta_hdr->ta_dtm_ucode_version); + adev->psp.ta_dtm_ucode_size = + le32_to_cpu(ta_hdr->ta_dtm_size_bytes); + adev->psp.ta_dtm_start_addr = + (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + } + return 0; + +out2: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; out: if (err) { dev_err(adev->dev, @@ -189,53 +230,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v10_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v10_0_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -365,15 +359,30 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp) return -EINVAL; } +static uint32_t psp_v10_0_ring_get_wptr(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + + return RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); +} + +static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v10_0_funcs = { .init_microcode = psp_v10_0_init_microcode, .ring_init = psp_v10_0_ring_init, .ring_create = psp_v10_0_ring_create, .ring_stop = psp_v10_0_ring_stop, .ring_destroy = psp_v10_0_ring_destroy, - .cmd_submit = psp_v10_0_cmd_submit, .compare_sram_data = psp_v10_0_compare_sram_data, .mode1_reset = psp_v10_0_mode1_reset, + .ring_get_wptr = psp_v10_0_ring_get_wptr, + .ring_set_wptr = psp_v10_0_ring_set_wptr, }; void psp_v10_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 10166104b8a3..c66ca8cc2ebd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); #define mmRLC_GPM_UCODE_DATA_NV10 0x5b62 #define mmSDMA0_UCODE_ADDR_NV10 0x5880 #define mmSDMA0_UCODE_DATA_NV10 0x5881 +/* memory training timeout define */ +#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 static int psp_v11_0_init_microcode(struct psp_context *psp) { @@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) switch (adev->asic_type) { case CHIP_VEGA20: + case CHIP_ARCTURUS: snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { @@ -182,7 +186,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: - case CHIP_ARCTURUS: + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); + adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); + adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version); + adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes); + adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + } break; default: BUG(); @@ -205,18 +233,26 @@ out: return err; } +static bool psp_v11_0_is_sos_alive(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + return sol_reg != 0x0; +} + static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) { int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check tOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { + if (psp_v11_0_is_sos_alive(psp)) { psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; @@ -233,7 +269,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) /* Copy PSP KDB binary to memory */ memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size); - /* Provide the sys driver to bootloader */ + /* Provide the PSP KDB to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE; @@ -252,13 +288,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { + if (psp_v11_0_is_sos_alive(psp)) { psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; @@ -296,13 +330,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) + if (psp_v11_0_is_sos_alive(psp)) return 0; /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ @@ -398,6 +430,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) return false; } +static int psp_v11_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + /* Write the ring destroy command*/ + if (psp_v11_0_support_vmr_ring(psp)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) */ + if (psp_v11_0_support_vmr_ring(psp)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + return ret; +} + static int psp_v11_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -407,6 +467,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; if (psp_v11_0_support_vmr_ring(psp)) { + ret = psp_v11_0_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -426,6 +492,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp, 0x80000000, 0x8000FFFF, false); } else { + /* Wait for sOS ready for ring creation */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + if (ret) { + DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_69 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); @@ -451,33 +525,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp, return ret; } -static int psp_v11_0_ring_stop(struct psp_context *psp, - enum psp_ring_type ring_type) -{ - int ret = 0; - struct amdgpu_device *adev = psp->adev; - - /* Write the ring destroy command*/ - if (psp_v11_0_support_vmr_ring(psp)) - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); - else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, - GFX_CTRL_CMD_ID_DESTROY_RINGS); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) */ - if (psp_v11_0_support_vmr_ring(psp)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); - else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); - - return ret; -} static int psp_v11_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) @@ -497,62 +544,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v11_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v11_0_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v11_0_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v11_0_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -889,6 +880,186 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp) return psp_rlc_autoload_start(psp); } +static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) +{ + int ret; + int i; + uint32_t data_32; + int max_wait; + struct amdgpu_device *adev = psp->adev; + + data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg); + + max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; + for (i = 0; i < max_wait; i++) { + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret == 0) + break; + } + if (i < max_wait) + ret = 0; + else + ret = -ETIME; + + DRM_DEBUG("training %s %s, cost %d @ %d ms\n", + (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long", + (ret == 0) ? "succeed" : "failed", + i, adev->usec_timeout/1000); + return ret; +} + +static void psp_v11_0_memory_training_fini(struct psp_context *psp) +{ + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + kfree(ctx->sys_cache); + ctx->sys_cache = NULL; +} + +static int psp_v11_0_memory_training_init(struct psp_context *psp) +{ + int ret; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { + DRM_DEBUG("memory training is not supported!\n"); + return 0; + } + + ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); + if (ctx->sys_cache == NULL) { + DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); + ret = -ENOMEM; + goto Err_out; + } + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); + ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; + return 0; + +Err_out: + psp_v11_0_memory_training_fini(psp); + return ret; +} + +/* + * save and restore proces + */ +static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) +{ + int ret; + uint32_t p2c_header[4]; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + uint32_t *pcache = (uint32_t*)ctx->sys_cache; + + if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { + DRM_DEBUG("Memory training is not supported.\n"); + return 0; + } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) { + DRM_ERROR("Memory training initialization failure.\n"); + return -EINVAL; + } + + if (psp_v11_0_is_sos_alive(psp)) { + DRM_DEBUG("SOS is alive, skip memory training.\n"); + return 0; + } + + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", + pcache[0], pcache[1], pcache[2], pcache[3], + p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + DRM_DEBUG("Short training depends on restore.\n"); + ops |= PSP_MEM_TRAIN_RESTORE; + } + + if ((ops & PSP_MEM_TRAIN_RESTORE) && + pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + pcache[3] == p2c_header[3])) { + DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if ((ops & PSP_MEM_TRAIN_SAVE) && + p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n"); + ops |= PSP_MEM_TRAIN_SEND_LONG_MSG; + } + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG; + ops |= PSP_MEM_TRAIN_SAVE; + } + + DRM_DEBUG("Memory training ops:%x.\n", ops); + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); + if (ret) { + DRM_ERROR("Send long training msg failed.\n"); + return ret; + } + } + + if (ops & PSP_MEM_TRAIN_SAVE) { + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false); + } + + if (ops & PSP_MEM_TRAIN_RESTORE) { + amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true); + } + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ? + PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN); + if (ret) { + DRM_ERROR("send training msg failed.\n"); + return ret; + } + } + ctx->training_cnt++; + return 0; +} + +static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (psp_v11_0_support_vmr_ring(psp)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (psp_v11_0_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -898,7 +1069,6 @@ static const struct psp_funcs psp_v11_0_funcs = { .ring_create = psp_v11_0_ring_create, .ring_stop = psp_v11_0_ring_stop, .ring_destroy = psp_v11_0_ring_destroy, - .cmd_submit = psp_v11_0_cmd_submit, .compare_sram_data = psp_v11_0_compare_sram_data, .mode1_reset = psp_v11_0_mode1_reset, .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, @@ -909,6 +1079,11 @@ static const struct psp_funcs psp_v11_0_funcs = { .ras_trigger_error = psp_v11_0_ras_trigger_error, .ras_cure_posion = psp_v11_0_ras_cure_posion, .rlc_autoload_start = psp_v11_0_rlc_autoload_start, + .mem_training_init = psp_v11_0_memory_training_init, + .mem_training_fini = psp_v11_0_memory_training_fini, + .mem_training = psp_v11_0_memory_training, + .ring_get_wptr = psp_v11_0_ring_get_wptr, + .ring_set_wptr = psp_v11_0_ring_set_wptr, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index c72e43f8e0be..58d8b6d732e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -334,62 +334,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v12_0_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v12_0_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v12_0_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v12_0_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -546,6 +490,30 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) return 0; } +static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (psp_v12_0_support_vmr_ring(psp)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + return data; +} + +static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (psp_v12_0_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v12_0_funcs = { .init_microcode = psp_v12_0_init_microcode, .bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv, @@ -554,9 +522,10 @@ static const struct psp_funcs psp_v12_0_funcs = { .ring_create = psp_v12_0_ring_create, .ring_stop = psp_v12_0_ring_stop, .ring_destroy = psp_v12_0_ring_destroy, - .cmd_submit = psp_v12_0_cmd_submit, .compare_sram_data = psp_v12_0_compare_sram_data, .mode1_reset = psp_v12_0_mode1_reset, + .ring_get_wptr = psp_v12_0_ring_get_wptr, + .ring_set_wptr = psp_v12_0_ring_set_wptr, }; void psp_v12_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index d2c727f6a8bd..735c43c7daab 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -179,7 +179,7 @@ static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) * Double check if the latest four legacy versions. * If yes, it is still the right version. */ - for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) { + for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) { if (sos_old_versions[i] == adev->psp.sos_fw_version) return true; } @@ -410,64 +410,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp, return ret; } -static int psp_v3_1_cmd_submit(struct psp_context *psp, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, - int index) -{ - unsigned int psp_write_ptr_reg = 0; - struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; - struct psp_ring *ring = &psp->km_ring; - struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; - struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + - ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; - struct amdgpu_device *adev = psp->adev; - uint32_t ring_size_dw = ring->ring_size / 4; - uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; - - /* KM (GPCOM) prepare write pointer */ - if (psp_v3_1_support_vmr_ring(psp)) - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); - else - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); - - /* Update KM RB frame pointer to new frame */ - /* write_frame ptr increments by size of rb_frame in bytes */ - /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ - if ((psp_write_ptr_reg % ring_size_dw) == 0) - write_frame = ring_buffer_start; - else - write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); - /* Check invalid write_frame ptr address */ - if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { - DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", - ring_buffer_start, ring_buffer_end, write_frame); - DRM_ERROR("write_frame is pointing to address out of bounds\n"); - return -EINVAL; - } - - /* Initialize KM RB frame */ - memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); - - /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); - write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); - write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); - write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); - write_frame->fence_value = index; - - /* Update the write Pointer in DWORDs */ - psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - if (psp_v3_1_support_vmr_ring(psp)) { - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); - /* send interrupt to PSP for SRIOV ring write pointer update */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_CONSUME_CMD); - } else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); - - return 0; -} - static int psp_v3_1_sram_map(struct amdgpu_device *adev, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, @@ -641,6 +583,31 @@ static bool psp_v3_1_support_vmr_ring(struct psp_context *psp) return false; } +static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) +{ + uint32_t data; + struct amdgpu_device *adev = psp->adev; + + if (psp_v3_1_support_vmr_ring(psp)) + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + return data; +} + +static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) +{ + struct amdgpu_device *adev = psp->adev; + + if (psp_v3_1_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); + /* send interrupt to PSP for SRIOV ring write pointer update */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); +} + static const struct psp_funcs psp_v3_1_funcs = { .init_microcode = psp_v3_1_init_microcode, .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv, @@ -649,11 +616,12 @@ static const struct psp_funcs psp_v3_1_funcs = { .ring_create = psp_v3_1_ring_create, .ring_stop = psp_v3_1_ring_stop, .ring_destroy = psp_v3_1_ring_destroy, - .cmd_submit = psp_v3_1_cmd_submit, .compare_sram_data = psp_v3_1_compare_sram_data, .smu_reload_quirk = psp_v3_1_smu_reload_quirk, .mode1_reset = psp_v3_1_mode1_reset, .support_vmr_ring = psp_v3_1_support_vmr_ring, + .ring_get_wptr = psp_v3_1_ring_get_wptr, + .ring_set_wptr = psp_v3_1_ring_set_wptr, }; void psp_v3_1_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 78452cf0115d..4ef4d31f5231 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, @@ -746,13 +747,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask = 0; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; sdma_v4_0_wait_reg_mem(ring, 0, 1, - adev->nbio_funcs->get_hdp_flush_done_offset(adev), - adev->nbio_funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), ref_and_mask, ref_and_mask, 10); } @@ -1690,102 +1691,17 @@ static int sdma_v4_0_early_init(void *handle) } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry); static int sdma_v4_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = &adev->sdma.ras_if; struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; - struct ras_fs_if fs_info = { - .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", - }; - struct ras_common_if ras_block = { - .block = AMDGPU_RAS_BLOCK__SDMA, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "sdma", - }; - int r, i; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); - return 0; - } - - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__SDMA); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__SDMA); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info.head = **ras_if; - - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - - amdgpu_ras_debugfs_create(adev, &fs_info); - r = amdgpu_ras_sysfs_create(adev, &fs_info); - if (r) - goto sysfs; -resume: - for (i = 0; i < adev->sdma.num_instances; i++) { - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); - if (r) - goto irq; - } - - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; + return amdgpu_sdma_ras_late_init(adev, &ih_info); } static int sdma_v4_0_sw_init(void *handle) @@ -1857,21 +1773,7 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && - adev->sdma.ras_if) { - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - /*remove fs first*/ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - /*remove the IH*/ - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } + amdgpu_sdma_ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -1891,7 +1793,7 @@ static int sdma_v4_0_hw_init(void *handle) if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) || - adev->asic_type == CHIP_RENOIR) + (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset)) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); if (!amdgpu_sriov_vf(adev)) @@ -2024,52 +1926,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry) { - uint32_t err_source; int instance; + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + goto out; + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); if (instance < 0) - return 0; - - switch (entry->src_id) { - case SDMA0_4_0__SRCID__SDMA_SRAM_ECC: - err_source = 0; - break; - case SDMA0_4_0__SRCID__SDMA_ECC: - err_source = 1; - break; - default: - return 0; - } - - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + goto out; - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_sdma_process_ras_data_cb(adev, err_data, entry); +out: return AMDGPU_RAS_SUCCESS; } -static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; - - ih_data.head = *ras_if; - - amdgpu_ras_interrupt_dispatch(adev, &ih_data); - return 0; -} - static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -2417,7 +2295,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = { .set = sdma_v4_0_set_ecc_irq_state, - .process = sdma_v4_0_process_ecc_irq, + .process = amdgpu_sdma_process_ecc_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index f6e81680dd7e..f4ad2990f973 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask = 0; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->me == 0) ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; @@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2); - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); amdgpu_ring_write(ring, ref_and_mask); /* reference */ amdgpu_ring_write(ring, ref_and_mask); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | @@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, 20); if (amdgpu_sriov_vf(adev)) @@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); } +static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + amdgpu_ring_emit_wreg(ring, reg0, ref); + /* wait for a cycle to reset vm_inv_eng*_ack */ + amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); + amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); +} + static int sdma_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { 6 + /* sdma_v5_0_ring_emit_pipeline_sync */ /* sdma_v5_0_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, @@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .pad_ib = sdma_v5_0_ring_pad_ib, .emit_wreg = sdma_v5_0_ring_emit_wreg, .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_0_ring_init_cond_exec, .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, .preempt_ib = sdma_v5_0_ring_preempt_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 493af42152f2..4d415bfdb42f 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {GRBM_STATUS}, + {mmGRBM_STATUS2}, + {mmGRBM_STATUS_SE0}, + {mmGRBM_STATUS_SE1}, + {mmSRBM_STATUS}, + {mmSRBM_STATUS2}, + {DMA_STATUS_REG + DMA0_REGISTER_OFFSET}, + {DMA_STATUS_REG + DMA1_REGISTER_OFFSET}, + {mmCP_STAT}, + {mmCP_STALLED_STAT1}, + {mmCP_STALLED_STAT2}, + {mmCP_STALLED_STAT3}, {GB_ADDR_CONFIG}, {MC_ARB_RAMCFG}, {GB_TILE_MODE0}, @@ -1186,6 +1197,11 @@ static int si_asic_reset(struct amdgpu_device *adev) return 0; } +static bool si_asic_supports_baco(struct amdgpu_device *adev) +{ + return false; +} + static enum amd_reset_method si_asic_reset_method(struct amdgpu_device *adev) { @@ -1414,6 +1430,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .get_pcie_usage = &si_get_pcie_usage, .need_reset_on_init = &si_need_reset_on_init, .get_pcie_replay_count = &si_get_pcie_replay_count, + .supports_baco = &si_asic_supports_baco, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) @@ -1633,7 +1650,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev) static void si_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -1668,12 +1684,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(adev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev)) return; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { @@ -1682,14 +1693,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL, + &gpu_cfg); tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16); tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL, + tmp16); tmp = RREG32_PCIE(PCIE_LC_STATUS1); max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; @@ -1706,15 +1720,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) } for (i = 0; i < 10; i++) { - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(adev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp |= LC_SET_QUIESCE; @@ -1726,25 +1748,44 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) mdelay(100); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, + tmp16); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); - - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_write_word(adev->pdev, + PCI_EXP_LNKCTL, + tmp16); + + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(adev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(adev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -1757,15 +1798,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - tmp16 |= 3; + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - tmp16 |= 2; + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; - pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 57bb5f9e08b2..88ae27a5a03d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev) u32 interrupt_cntl, ih_cntl, ih_rb_cntl; si_ih_disable_interrupts(adev); - WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f8ab80c8801b..5bd6ae7a52fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -58,13 +58,18 @@ #include "mmhub_v1_0.h" #include "df_v1_7.h" #include "df_v3_6.h" +#include "nbio_v6_1.h" +#include "nbio_v7_0.h" +#include "nbio_v7_4.h" #include "vega10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" #include "vce_v4_0.h" #include "vcn_v1_0.h" #include "vcn_v2_0.h" +#include "jpeg_v2_0.h" #include "vcn_v2_5.h" +#include "jpeg_v2_5.h" #include "dce_virtual.h" #include "mxgpu_ai.h" #include "amdgpu_smu.h" @@ -91,8 +96,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -106,8 +111,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -121,8 +126,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u64 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); /* read low 32 bit */ @@ -142,8 +147,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); /* write low 32 bit */ @@ -262,7 +267,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_memsize(adev); + return adev->nbio.funcs->get_memsize(adev); } static u32 soc15_get_xclk(struct amdgpu_device *adev) @@ -336,6 +341,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, @@ -461,7 +467,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio_funcs->get_memsize(adev); + u32 memsize = adev->nbio.funcs->get_memsize(adev); if (memsize != 0xffffffff) break; @@ -475,42 +481,72 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } + *cap = smu_baco_is_support(smu); + return 0; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - return pp_funcs->get_asic_baco_capability(pp_handle, cap); + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } + + return pp_funcs->get_asic_baco_capability(pp_handle, cap); + } } static int soc15_asic_baco_reset(struct amdgpu_device *adev) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; + /* avoid NBIF got stuck when do RAS recovery in BACO reset */ + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; + dev_info(adev->dev, "GPU BACO reset\n"); - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; + int ret; - dev_info(adev->dev, "GPU BACO reset\n"); + ret = smu_baco_enter(smu); + if (ret) + return ret; + + ret = smu_baco_exit(smu); + if (ret) + return ret; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + } - adev->in_baco_reset = 1; + /* re-enable doorbell interrupt after BACO exit */ + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); return 0; } static int soc15_mode2_reset(struct amdgpu_device *adev) { + if (is_support_sw_smu(adev)) + return smu_mode2_reset(&adev->smu); if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->asic_reset_mode_2) return -ENOENT; @@ -521,30 +557,30 @@ static int soc15_mode2_reset(struct amdgpu_device *adev) static enum amd_reset_method soc15_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset; + bool baco_reset = false; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_RENOIR: return AMD_RESET_METHOD_MODE2; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_ARCTURUS: soc15_asic_get_baco_capability(adev, &baco_reset); break; case CHIP_VEGA20: if (adev->psp.sos_fw_version >= 0x80067) soc15_asic_get_baco_capability(adev, &baco_reset); - else - baco_reset = false; - if (baco_reset) { - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (hive || (ras && ras->supported)) - baco_reset = false; - } + /* + * 1. PMFW version > 0x284300: all cases use baco + * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco + */ + if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) + baco_reset = false; break; default: - baco_reset = false; break; } @@ -570,6 +606,28 @@ static int soc15_asic_reset(struct amdgpu_device *adev) } } +static bool soc15_supports_baco(struct amdgpu_device *adev) +{ + bool baco_support; + + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA12: + soc15_asic_get_baco_capability(adev, &baco_support); + break; + case CHIP_VEGA20: + if (adev->psp.sos_fw_version >= 0x80067) + soc15_asic_get_baco_capability(adev, &baco_support); + else + baco_support = false; + break; + default: + return false; + } + + return baco_support; +} + /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, u32 cntl_reg, u32 status_reg) { @@ -626,8 +684,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev) static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version vega10_common_ip_block = @@ -641,7 +699,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block = static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_rev_id(adev); + return adev->nbio.funcs->get_rev_id(adev); } int soc15_set_ip_blocks(struct amdgpu_device *adev) @@ -667,13 +725,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->gmc.xgmi.supported = true; - if (adev->flags & AMD_IS_APU) - adev->nbio_funcs = &nbio_v7_0_funcs; - else if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) - adev->nbio_funcs = &nbio_v7_4_funcs; - else - adev->nbio_funcs = &nbio_v6_1_funcs; + if (adev->flags & AMD_IS_APU) { + adev->nbio.funcs = &nbio_v7_0_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; + } else if (adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS) { + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; + } else { + adev->nbio.funcs = &nbio_v6_1_funcs; + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; + } if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->df_funcs = &df_v3_6_funcs; @@ -681,7 +743,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->df_funcs = &df_v1_7_funcs; adev->rev_id = soc15_get_rev_id(adev); - adev->nbio_funcs->detect_hw_virt(adev); + adev->nbio.funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_ai_virt_ops; @@ -713,11 +775,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) } amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - if (is_support_sw_smu(adev)) + if (is_support_sw_smu(adev)) { + if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + } else { + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); } if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -750,13 +812,28 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_ARCTURUS: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + + if (amdgpu_sriov_vf(adev)) { + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + } else { + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + } + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + + if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); break; case CHIP_RENOIR: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); @@ -775,6 +852,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; @@ -785,7 +863,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - adev->nbio_funcs->hdp_flush(adev, ring); + adev->nbio.funcs->hdp_flush(adev, ring); } static void soc15_invalidate_hdp(struct amdgpu_device *adev, @@ -953,6 +1031,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .get_pcie_usage = &soc15_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .supports_baco = &soc15_supports_baco, }; static const struct amdgpu_asic_funcs vega20_asic_funcs = @@ -961,6 +1040,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .read_bios_from_rom = &soc15_read_bios_from_rom, .read_register = &soc15_read_register, .reset = &soc15_asic_reset, + .reset_method = &soc15_asic_reset_method, .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, @@ -973,7 +1053,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_pcie_usage = &vega20_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, - .reset_method = &soc15_asic_reset_method + .supports_baco = &soc15_supports_baco, }; static int soc15_common_early_init(void *handle) @@ -1157,7 +1237,10 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_MC_MGCG | - AMD_CG_SUPPORT_MC_LS; + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x32; break; @@ -1178,19 +1261,16 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG | AMD_CG_SUPPORT_IH_CG | AMD_CG_SUPPORT_ATHUB_LS | AMD_CG_SUPPORT_ATHUB_MGCG | AMD_CG_SUPPORT_DF_MGCG; adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x91; - - if (adev->pm.pp_feature & PP_GFXOFF_MASK) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; break; default: /* FIXME: not supported yet */ @@ -1208,11 +1288,15 @@ static int soc15_common_early_init(void *handle) static int soc15_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r = 0; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - return 0; + if (adev->nbio.funcs->ras_late_init) + r = adev->nbio.funcs->ras_late_init(adev); + + return r; } static int soc15_common_sw_init(void *handle) @@ -1229,6 +1313,10 @@ static int soc15_common_sw_init(void *handle) static int soc15_common_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_nbio_ras_fini(adev); + adev->df_funcs->sw_fini(adev); return 0; } @@ -1241,12 +1329,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - adev->nbio_funcs->sdma_doorbell_range(adev, i, + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, adev->doorbell_index.sdma_doorbell_range); } - adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index); } } @@ -1260,13 +1348,13 @@ static int soc15_common_hw_init(void *handle) /* enable aspm */ soc15_program_aspm(adev); /* setup nbio registers */ - adev->nbio_funcs->init_registers(adev); + adev->nbio.funcs->init_registers(adev); /* remap HDP registers to a hole in mmio space, * for the purpose of expose those registers * to process space */ - if (adev->nbio_funcs->remap_hdp_registers) - adev->nbio_funcs->remap_hdp_registers(adev); + if (adev->nbio.funcs->remap_hdp_registers) + adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); @@ -1289,6 +1377,14 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); + if (adev->nbio.ras_if && + amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { + if (adev->nbio.funcs->init_ras_controller_interrupt) + amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) + amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); + } + return 0; } @@ -1429,9 +1525,9 @@ static int soc15_common_set_clockgating_state(void *handle, case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1446,9 +1542,9 @@ static int soc15_common_set_clockgating_state(void *handle, break; case CHIP_RAVEN: case CHIP_RENOIR: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1477,7 +1573,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio_funcs->get_clockgating_state(adev, flags); + adev->nbio.funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_LS */ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index a3dde0c31f57..d0fb7a67c1a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -28,8 +28,8 @@ #include "nbio_v7_0.h" #include "nbio_v7_4.h" -#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4 -#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1 +#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 +#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 extern const struct amd_ip_funcs soc15_common_ip_funcs; @@ -60,6 +60,18 @@ struct soc15_allowed_register_entry { bool grbm_indexed; }; +struct soc15_ras_field_entry { + const char *name; + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t sec_count_mask; + uint32_t sec_count_shift; + uint32_t ded_count_mask; + uint32_t ded_count_shift; +}; + #define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset) @@ -67,6 +79,8 @@ struct soc15_allowed_register_entry { #define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask } +#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT + void soc15_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int soc15_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 839f186e1182..19e870c79896 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -52,6 +52,7 @@ uint32_t old_ = 0; \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t loop = adev->usec_timeout; \ + ret = 0; \ while ((tmp_ & (mask)) != (expected_value)) { \ if (old_ != tmp_) { \ loop = adev->usec_timeout; \ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c new file mode 100644 index 000000000000..0d6b50528d76 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v6_0.h" +#include "amdgpu.h" + +static void umc_v6_0_init_registers(struct amdgpu_device *adev) +{ + unsigned i,j; + + for (i = 0; i < 4; i++) + for (j = 0; j < 4; j++) + WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002); +} + +const struct amdgpu_umc_funcs umc_v6_0_funcs = { + .init_registers = umc_v6_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h new file mode 100644 index 000000000000..109f1a57a46e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V6_0_H__ +#define __UMC_V6_0_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +extern const struct amdgpu_umc_funcs umc_v6_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 8502e736f721..515eb50cd0f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -31,6 +31,14 @@ #define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 +/* UMC 6_1_2 register offsets */ +#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360 +#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1 +#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361 +#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1 + /* * (addr / 256) * 8192, the higher 26 bits in ErrorAddr * is the index of 8KB block @@ -75,6 +83,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) RSMU_UMC_INDEX_MODE_EN, 0); } +static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) +{ + uint32_t rsmu_umc_index; + + rsmu_umc_index = RREG32_SOC15(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + return REG_GET_FIELD(rsmu_umc_index, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_INSTANCE); +} + static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -84,12 +103,25 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint64_t mc_umc_status; uint32_t mc_umc_status_addr; - ecc_err_cnt_sel_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); - ecc_err_cnt_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* select the lower chip and check the error count */ ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); @@ -130,8 +162,17 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev uint64_t mc_umc_status; uint32_t mc_umc_status_addr; - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* check the MCUMC_STATUS */ mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); @@ -165,10 +206,20 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, uint32_t umc_reg_offset, uint32_t channel_index) { uint32_t lsb, mc_umc_status_addr; - uint64_t mc_umc_status, err_addr; + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; + + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); + } else { + /* UMC 6_1_1 registers */ + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + } /* skip error address process if -ENOMEM */ if (!err_data->err_addr) { @@ -177,6 +228,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, return; } + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); /* calculate error address if ue/ce error is detected */ @@ -191,12 +243,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, err_addr &= ~((0x1ULL << lsb) - 1); /* translate umc channel address to soc pa, 3 parts are included */ - err_data->err_addr[err_data->err_addr_cnt] = - ADDR_OF_8KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - err_data->err_addr_cnt++; + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); + + err_data->err_addr_cnt++; + } } /* clear umc status */ @@ -209,17 +273,28 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); } -static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, +static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, struct ras_err_data *err_data, uint32_t umc_reg_offset, uint32_t channel_index) { uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; uint32_t ecc_err_cnt_addr; - ecc_err_cnt_sel_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); - ecc_err_cnt_addr = - SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + if (adev->asic_type == CHIP_ARCTURUS) { + /* UMC 6_1_2 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); + } else { + /* UMC 6_1_1 registers */ + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + } /* select the lower chip and check the error count */ ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); @@ -239,15 +314,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); } -static void umc_v6_1_ras_init(struct amdgpu_device *adev) +static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) { void *ras_error_status = NULL; - amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel); + amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); } const struct amdgpu_umc_funcs umc_v6_1_funcs = { - .ras_init = umc_v6_1_ras_init, + .err_cnt_init = umc_v6_1_err_cnt_init, + .ras_late_init = amdgpu_umc_ras_late_init, .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index dab9cbd292c5..0ce1d323cfdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -35,7 +35,8 @@ /* total channel instances in one umc block */ #define UMC_V6_1_TOTAL_CHANNEL_NUM (UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM) /* UMC regiser per channel offset */ -#define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 +#define UMC_V6_1_PER_CHANNEL_OFFSET_VG20 0x800 +#define UMC_V6_1_PER_CHANNEL_OFFSET_ARCT 0x400 /* EccErrCnt max value */ #define UMC_V6_1_CE_CNT_MAX 0xffff diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 670784a78512..217084d56ab8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) * Open up a stream for HW test */ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00010000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ @@ -268,13 +269,14 @@ err: */ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00010000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ @@ -327,13 +329,20 @@ err: static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; - r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; - r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); + r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence); if (r) goto error; @@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 01f658fa72c6..0995378d8263 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) * Open up a stream for HW test */ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00000000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ @@ -275,13 +276,14 @@ err: * Close up a stream for HW test or if userspace failed to do so */ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00000000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; @@ -334,13 +336,20 @@ err: static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; - r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; - r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence); + r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence); if (r) goto error; @@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 93b3500e522b..652cecc030b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -36,6 +36,7 @@ #include "mmhub/mmhub_9_1_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" +#include "jpeg_v1_0.h" #define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 @@ -45,9 +46,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); -static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, struct dpg_pause_state *new_state); @@ -68,9 +67,10 @@ static int vcn_v1_0_early_init(void *handle) vcn_v1_0_set_dec_ring_funcs(adev); vcn_v1_0_set_enc_ring_funcs(adev); - vcn_v1_0_set_jpeg_ring_funcs(adev); vcn_v1_0_set_irq_funcs(adev); + jpeg_v1_0_early_init(handle); + return 0; } @@ -101,11 +101,6 @@ static int vcn_v1_0_sw_init(void *handle) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; @@ -149,17 +144,11 @@ static int vcn_v1_0_sw_init(void *handle) return r; } - ring = &adev->vcn.inst->ring_jpeg; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); - if (r) - return r; - adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch = - SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); - return 0; + r = jpeg_v1_0_sw_init(handle); + + return r; } /** @@ -178,6 +167,8 @@ static int vcn_v1_0_sw_fini(void *handle) if (r) return r; + jpeg_v1_0_sw_fini(handle); + r = amdgpu_vcn_sw_fini(adev); return r; @@ -202,13 +193,12 @@ static int vcn_v1_0_hw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = true; r = amdgpu_ring_test_helper(ring); if (r) goto done; } - ring = &adev->vcn.inst->ring_jpeg; + ring = &adev->jpeg.inst->ring_dec; r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -948,22 +938,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); - ring = &adev->vcn.inst->ring_jpeg; - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | - UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - - /* initialize wptr */ - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - /* copy patch commands to the jpeg ring */ - vcn_v1_0_jpeg_ring_set_patch_ring(ring, - (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + jpeg_v1_0_start(adev, 0); return 0; } @@ -1107,13 +1082,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - /* initialize JPEG wptr */ - ring = &adev->vcn.inst->ring_jpeg; - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - /* copy patch commands to the jpeg ring */ - vcn_v1_0_jpeg_ring_set_patch_ring(ring, - (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + jpeg_v1_0_start(adev, 1); return 0; } @@ -1317,7 +1286,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); /* Restore */ - ring = &adev->vcn.inst->ring_jpeg; + ring = &adev->jpeg.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | @@ -1717,389 +1686,6 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } - -/** - * vcn_v1_0_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v1_0_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v1_0_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); -} - -/** - * vcn_v1_0_jpeg_ring_insert_start - insert a start command - * - * @ring: amdgpu_ring pointer - * - * Write a start command to the ring. - */ -static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80010000); -} - -/** - * vcn_v1_0_jpeg_ring_insert_end - insert a end command - * - * @ring: amdgpu_ring pointer - * - * Write a end command to the ring. - */ -static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00010000); -} - -/** - * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command - * - * @ring: amdgpu_ring pointer - * @fence: fence to emit - * - * Write a fence and a trap command to the ring. - */ -static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) -{ - struct amdgpu_device *adev = ring->adev; - - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0xffffffff); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - - /* emit trap */ - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); - amdgpu_ring_write(ring, 0); -} - -/** - * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer - * - * @ring: amdgpu_ring pointer - * @ib: indirect buffer to execute - * - * Write ring commands to execute the indirect buffer. - */ -static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) -{ - struct amdgpu_device *adev = ring->adev; - unsigned vmid = AMDGPU_JOB_GET_VMID(job); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, ib->length_dw); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, - PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); - amdgpu_ring_write(ring, 0x2); -} - -static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val, - uint32_t mask) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, val); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE3)); - } - amdgpu_ring_write(ring, mask); -} - -static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) -{ - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t data0, data1, mask; - - pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - - /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; - data1 = lower_32_bits(pd_addr); - mask = 0xffffffff; - vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); -} - -static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0)); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, - PACKETJ(0, 0, 0, PACKETJ_TYPE0)); - } - amdgpu_ring_write(ring, val); -} - -static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) -{ - int i; - - WARN_ON(ring->wptr % 2 || count % 2); - - for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } -} - -static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val) -{ - struct amdgpu_device *adev = ring->adev; - ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - ring->ring[(*ptr)++] = 0; - ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); - } else { - ring->ring[(*ptr)++] = reg_offset; - ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); - } - ring->ring[(*ptr)++] = val; -} - -static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) -{ - struct amdgpu_device *adev = ring->adev; - - uint32_t reg, reg_offset, val, mask, i; - - // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW); - reg_offset = (reg << 2); - val = lower_32_bits(ring->gpu_addr); - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH); - reg_offset = (reg << 2); - val = upper_32_bits(ring->gpu_addr); - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 3rd to 5th: issue MEM_READ commands - for (i = 0; i <= 2; i++) { - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2); - ring->ring[ptr++] = 0; - } - - // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x13; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 7th: program mmUVD_JRBC_RB_REF_DATA - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA); - reg_offset = (reg << 2); - val = 0x1; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x1; - mask = 0x1; - - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0); - ring->ring[ptr++] = 0x01400200; - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0); - ring->ring[ptr++] = val; - ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0); - if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) || - ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) { - ring->ring[ptr++] = 0; - ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3); - } else { - ring->ring[ptr++] = reg_offset; - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3); - } - ring->ring[ptr++] = mask; - - //9th to 21st: insert no-op - for (i = 0; i <= 12; i++) { - ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); - ring->ring[ptr++] = 0; - } - - //22nd: reset mmUVD_JRBC_RB_RPTR - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR); - reg_offset = (reg << 2); - val = 0; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); - - //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch - reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL); - reg_offset = (reg << 2); - val = 0x12; - vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val); -} - static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -2124,9 +1710,6 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, case 120: amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; - case 126: - amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2260,41 +1843,6 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), - .support_64bit_ptrs = false, - .no_user_fence = true, - .vmhub = AMDGPU_MMHUB_0, - .extra_dw = 64, - .get_rptr = vcn_v1_0_jpeg_ring_get_rptr, - .get_wptr = vcn_v1_0_jpeg_ring_get_wptr, - .set_wptr = vcn_v1_0_jpeg_ring_set_wptr, - .emit_frame_size = - 6 + 6 + /* hdp invalidate / flush */ - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */ - 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */ - 6, - .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v1_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v1_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v1_0_jpeg_ring_nop, - .insert_start = vcn_v1_0_jpeg_ring_insert_start, - .insert_end = vcn_v1_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; @@ -2311,12 +1859,6 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } -static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); -} - static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { .set = vcn_v1_0_set_interrupt_state, .process = vcn_v1_0_process_interrupt, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 36ad0c0e8efb..d76ece38c97b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -47,26 +47,6 @@ #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2 -#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff -#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 -#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a -#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb -#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf -#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 -#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed -#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 -#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 -#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 - #define mmUVD_RBC_XX_IB_REG_CHECK 0x026b #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 #define mmUVD_REG_XX_MASK 0x026c @@ -74,7 +54,6 @@ static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); @@ -97,7 +76,6 @@ static int vcn_v2_0_early_init(void *handle) vcn_v2_0_set_dec_ring_funcs(adev); vcn_v2_0_set_enc_ring_funcs(adev); - vcn_v2_0_set_jpeg_ring_funcs(adev); vcn_v2_0_set_irq_funcs(adev); return 0; @@ -132,12 +110,6 @@ static int vcn_v2_0_sw_init(void *handle) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; @@ -194,19 +166,8 @@ static int vcn_v2_0_sw_init(void *handle) return r; } - ring = &adev->vcn.inst->ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); - if (r) - return r; - adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); - return 0; } @@ -244,32 +205,18 @@ static int vcn_v2_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } - } - - ring = &adev->vcn.inst->ring_jpeg; - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; - goto done; } done: @@ -305,9 +252,6 @@ static int vcn_v2_0_hw_fini(void *handle) ring->sched.ready = false; } - ring = &adev->vcn.inst->ring_jpeg; - ring->sched.ready = false; - return 0; } @@ -402,7 +346,6 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config); } static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect) @@ -657,129 +600,6 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev, } /** - * jpeg_v2_0_start - start JPEG block - * - * @adev: amdgpu_device pointer - * - * Setup and start the JPEG block - */ -static int jpeg_v2_0_start(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg; - uint32_t tmp; - int r = 0; - - /* disable power gating */ - tmp = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp); - - SOC15_WAIT_ON_RREG(VCN, 0, - mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON, - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); - return r; - } - - /* Removing the anti hang mechanism to indicate the UVDJ tile is ON */ - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JPEG_ENC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - - return 0; -} - -/** - * jpeg_v2_0_stop - stop JPEG block - * - * @adev: amdgpu_device pointer - * - * stop the JPEG block - */ -static int jpeg_v2_0_stop(struct amdgpu_device *adev) -{ - uint32_t tmp; - int r = 0; - - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable JPEG CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JPEG_ENC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable power gating */ - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS)); - tmp &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK; - tmp |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), tmp); - - tmp = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_PGFSM_CONFIG), tmp); - - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, - (2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT), - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK, r); - - if (r) { - DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); - return r; - } - - return r; -} - -/** * vcn_v2_0_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer @@ -1061,12 +881,8 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - r = vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram); - if (r) - return r; - goto jpeg; - } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram); vcn_v2_0_disable_static_power_gating(adev); @@ -1218,10 +1034,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); -jpeg: - r = jpeg_v2_0_start(adev); - - return r; + return 0; } static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev) @@ -1240,9 +1053,6 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_device *adev) tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); - tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); - tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); @@ -1261,10 +1071,6 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev) uint32_t tmp; int r; - r = jpeg_v2_0_stop(adev); - if (r) - return r; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v2_0_stop_dpg_mode(adev); if (r) @@ -1790,272 +1596,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_ amdgpu_ring_write(ring, val); } -/** - * vcn_v2_0_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v2_0_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; - else - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v2_0_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); - } else { - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); - } -} - -/** - * vcn_v2_0_jpeg_ring_insert_start - insert a start command - * - * @ring: amdgpu_ring pointer - * - * Write a start command to the ring. - */ -void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x80010000); -} - -/** - * vcn_v2_0_jpeg_ring_insert_end - insert a end command - * - * @ring: amdgpu_ring pointer - * - * Write a end command to the ring. - */ -void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) -{ - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x68e04); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x00010000); -} - -/** - * vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command - * - * @ring: amdgpu_ring pointer - * @fence: fence to emit - * - * Write a fence and a trap command to the ring. - */ -void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) -{ - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, seq); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x3fbc); - - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x1); - - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); - amdgpu_ring_write(ring, 0); -} - -/** - * vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer - * - * @ring: amdgpu_ring pointer - * @ib: indirect buffer to execute - * - * Write ring commands to execute the indirect buffer. - */ -void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) -{ - unsigned vmid = AMDGPU_JOB_GET_VMID(job); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, (vmid | (vmid << 4))); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, ib->length_dw); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr)); - - amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2)); - amdgpu_ring_write(ring, 0); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET, - 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3)); - amdgpu_ring_write(ring, 0x2); -} - -void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, 0x01400200); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - amdgpu_ring_write(ring, val); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE3)); - } - amdgpu_ring_write(ring, mask); -} - -void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) -{ - struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t data0, data1, mask; - - pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); - - /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; - data1 = lower_32_bits(pd_addr); - mask = 0xffffffff; - vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); -} - -void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) -{ - uint32_t reg_offset = (reg << 2); - - amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, - 0, 0, PACKETJ_TYPE0)); - if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) { - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, - PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0)); - } else { - amdgpu_ring_write(ring, reg_offset); - amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, - 0, 0, PACKETJ_TYPE0)); - } - amdgpu_ring_write(ring, val); -} - -void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) -{ - int i; - - WARN_ON(ring->wptr % 2 || count % 2); - - for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6)); - amdgpu_ring_write(ring, 0); - } -} - static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned type, @@ -2080,9 +1620,6 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2228,36 +1765,6 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_0, - .get_rptr = vcn_v2_0_jpeg_ring_get_rptr, - .get_wptr = vcn_v2_0_jpeg_ring_get_wptr, - .set_wptr = vcn_v2_0_jpeg_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ - 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ - 8 + 16, - .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v2_0_jpeg_ring_nop, - .insert_start = vcn_v2_0_jpeg_ring_insert_start, - .insert_end = vcn_v2_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs; @@ -2274,12 +1781,6 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } -static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); -} - static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { .set = vcn_v2_0_set_interrupt_state, .process = vcn_v2_0_process_interrupt, @@ -2287,7 +1788,7 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index 8467292f32e5..ef749b02ded9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -49,19 +49,6 @@ extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr); extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); -extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring); -extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring); -extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags); -extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_ib *ib, uint32_t flags); -extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask); -extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr); -extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); -extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count); - extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block; #endif /* __VCN_V2_0_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 395c2259f979..f67fca38c1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" +#include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" #include "vcn_v2_0.h" @@ -46,13 +47,10 @@ #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 +#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); -static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_5_set_powergating_state(void *handle, enum amd_powergating_state state); @@ -94,7 +92,6 @@ static int vcn_v2_5_early_init(void *handle) vcn_v2_5_set_dec_ring_funcs(adev); vcn_v2_5_set_enc_ring_funcs(adev); - vcn_v2_5_set_jpeg_ring_funcs(adev); vcn_v2_5_set_irq_funcs(adev); return 0; @@ -129,12 +126,6 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; } - - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq); - if (r) - return r; } r = amdgpu_vcn_sw_init(adev); @@ -183,9 +174,6 @@ static int vcn_v2_5_sw_init(void *handle) adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH); - ring = &adev->vcn.inst[j].ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; @@ -203,14 +191,6 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; } - - ring = &adev->vcn.inst[j].ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j; - sprintf(ring->name, "vcn_jpeg_%d", j); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); - if (r) - return r; } return 0; @@ -255,33 +235,21 @@ static int vcn_v2_5_hw_init(void *handle) continue; ring = &adev->vcn.inst[j].ring_dec; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, j); - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst[j].ring_enc[i]; - ring->sched.ready = false; - continue; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } - } - - ring = &adev->vcn.inst[j].ring_jpeg; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; - goto done; } } + done: if (!r) DRM_INFO("VCN decode and encode initialized successfully.\n"); @@ -300,7 +268,7 @@ static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i; + int i, j; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -312,13 +280,10 @@ static int vcn_v2_5_hw_fini(void *handle) ring->sched.ready = false; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[i].ring_enc[i]; + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + ring = &adev->vcn.inst[i].ring_enc[j]; ring->sched.ready = false; } - - ring = &adev->vcn.inst[i].ring_jpeg; - ring->sched.ready = false; } return 0; @@ -423,7 +388,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) * vcn_v2_5_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Disable clock gating for VCN block */ @@ -542,7 +506,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) * vcn_v2_5_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Enable clock gating for VCN block */ @@ -601,121 +564,15 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) } } -/** - * jpeg_v2_5_start - start JPEG block - * - * @adev: amdgpu_device pointer - * - * Setup and start the JPEG block - */ -static int jpeg_v2_5_start(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - uint32_t tmp; - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ring = &adev->vcn.inst[i].ring_jpeg; - /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); - tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK - | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK - | JPEG_CGC_CTRL__JMCIF_MODE_MASK - | JPEG_CGC_CTRL__JRBBM_MODE_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); - - /* MJPEG global tiling registers */ - WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR); - } - - return 0; -} - -/** - * jpeg_v2_5_stop - stop JPEG block - * - * @adev: amdgpu_device pointer - * - * stop the JPEG block - */ -static int jpeg_v2_5_stop(struct amdgpu_device *adev) -{ - uint32_t tmp; - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); - - /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - } - - return 0; -} - static int vcn_v2_5_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; int i, j, k, r; + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -880,19 +737,14 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); } - r = jpeg_v2_5_start(adev); - return r; + return 0; } static int vcn_v2_5_stop(struct amdgpu_device *adev) { uint32_t tmp; - int i, r; - - r = jpeg_v2_5_stop(adev); - if (r) - return r; + int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -946,6 +798,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + return 0; } @@ -1128,86 +983,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -/** - * vcn_v2_5_jpeg_ring_get_rptr - get read pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware read pointer - */ -static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR); -} - -/** - * vcn_v2_5_jpeg_ring_get_wptr - get write pointer - * - * @ring: amdgpu_ring pointer - * - * Returns the current hardware write pointer - */ -static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) - return adev->wb.wb[ring->wptr_offs]; - else - return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR); -} - -/** - * vcn_v2_5_jpeg_ring_set_wptr - set write pointer - * - * @ring: amdgpu_ring pointer - * - * Commits the write pointer to the hardware - */ -static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - - if (ring->use_doorbell) { - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); - WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); - } else { - WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); - } -} - -static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { - .type = AMDGPU_RING_TYPE_VCN_JPEG, - .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB_1, - .get_rptr = vcn_v2_5_jpeg_ring_get_rptr, - .get_wptr = vcn_v2_5_jpeg_ring_get_wptr, - .set_wptr = vcn_v2_5_jpeg_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ - 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ - 8 + 16, - .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ - .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, - .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_jpeg_ring_test_ring, - .test_ib = amdgpu_vcn_jpeg_ring_test_ib, - .insert_nop = vcn_v2_0_jpeg_ring_nop, - .insert_start = vcn_v2_0_jpeg_ring_insert_start, - .insert_end = vcn_v2_0_jpeg_ring_insert_end, - .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, - .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, - .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, -}; - static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { int i; @@ -1236,19 +1011,6 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) } } -static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; - adev->vcn.inst[i].ring_jpeg.me = i; - DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i); - } -} - static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1355,9 +1117,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -1379,7 +1138,7 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; } } diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 9eae3536ddad..5cb7e231de5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) /* disable irqs */ vega10_ih_disable_interrupts(adev); - adev->nbio_funcs->ih_control(adev); + adev->nbio.funcs->ih_control(adev); ih = &adev->irq.ih; /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ @@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle) return 0; } +static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); + field_val = enable ? 0 : 1; + /** + * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE + * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. + */ + if (adev->asic_type > CHIP_VEGA10) { + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); + } + + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data); + } +} + static int vega10_ih_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega10_ih_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE ? true : false); return 0; + } static int vega10_ih_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index bd0580334f83..6b52a539d51b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "vega10_ip_offset.h" int vega10_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 587e33f5dcce..556f854e3551 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "vega20_ip_offset.h" int vega20_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 5f8c8786cac5..e4f4201b3c34 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) return -EINVAL; } +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } + + return pp_funcs->get_asic_baco_capability(pp_handle, cap); +} + +int smu7_asic_baco_reset(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + + dev_info(adev->dev, "GPU BACO reset\n"); + + return 0; +} + /** - * vi_asic_reset - soft reset GPU + * vi_asic_pci_config_reset - soft reset GPU * * @adev: amdgpu_device pointer * - * Look up which blocks are hung and attempt - * to reset them. + * Use PCI Config method to reset the GPU. + * * Returns 0 for success. */ -static int vi_asic_reset(struct amdgpu_device *adev) +static int vi_asic_pci_config_reset(struct amdgpu_device *adev) { int r; @@ -711,10 +745,74 @@ static int vi_asic_reset(struct amdgpu_device *adev) return r; } +static bool vi_asic_supports_baco(struct amdgpu_device *adev) +{ + bool baco_support; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + smu7_asic_get_baco_capability(adev, &baco_support); + break; + default: + baco_support = false; + break; + } + + return baco_support; +} + static enum amd_reset_method vi_asic_reset_method(struct amdgpu_device *adev) { - return AMD_RESET_METHOD_LEGACY; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + smu7_asic_get_baco_capability(adev, &baco_reset); + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_LEGACY; +} + +/** + * vi_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int vi_asic_reset(struct amdgpu_device *adev) +{ + int r; + + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + if (!adev->in_suspend) + amdgpu_inc_vram_lost(adev); + r = smu7_asic_baco_reset(adev); + } else { + r = vi_asic_pci_config_reset(adev); + } + + return r; } static u32 vi_get_config_memsize(struct amdgpu_device *adev) @@ -1042,6 +1140,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_pcie_usage = &vi_get_pcie_usage, .need_reset_on_init = &vi_need_reset_on_init, .get_pcie_replay_count = &vi_get_pcie_replay_count, + .supports_baco = &vi_asic_supports_baco, }; #define CZ_REV_BRISTOL(rev) \ diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 8de0772f986c..40d4174913a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev, int vi_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); +int smu7_asic_baco_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index a1a35d4d594b..b3672d10ea54 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -1,11 +1,11 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT # # Heterogenous system architecture configuration # config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && (X86_64 || ARM64) + depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64) imply AMD_IOMMU_V2 if X86_64 select MMU_NOTIFIER help diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 48155060a57c..61474627a32c 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -38,11 +38,9 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_mqd_manager_v9.o \ $(AMDKFD_PATH)/kfd_mqd_manager_v10.o \ $(AMDKFD_PATH)/kfd_kernel_queue.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \ - $(AMDKFD_PATH)/kfd_kernel_queue_v10.o \ $(AMDKFD_PATH)/kfd_packet_manager.o \ + $(AMDKFD_PATH)/kfd_packet_manager_vi.o \ + $(AMDKFD_PATH)/kfd_packet_manager_v9.o \ $(AMDKFD_PATH)/kfd_process_queue_manager.o \ $(AMDKFD_PATH)/kfd_device_queue_manager.o \ $(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 177d1e5329a5..9f59ba93cfe0 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, const struct cik_ih_ring_entry *ihre = (const struct cik_ih_ring_entry *)ih_ring_entry; const struct kfd2kgd_calls *f2g = dev->kfd2kgd; - unsigned int vmid, pasid; + unsigned int vmid; + uint16_t pasid; + bool ret; /* This workaround is due to HW/FW limitation on Hawaii that * VMID and PASID are not written into ih_ring_entry @@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, *tmp_ihre = *ihre; vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd); - pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid); + ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid); tmp_ihre->ring_id &= 0x000000ff; tmp_ihre->ring_id |= vmid << 8; tmp_ihre->ring_id |= pasid << 16; - return (pasid != 0) && + return ret && (pasid != 0) && vmid >= dev->vm_info.first_vmid_kfd && vmid <= dev->vm_info.last_vmid_kfd; } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 901fe3590165..d3400da6ab64 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, 0xbf85fff8, - 0xbf820141, 0xbef4037e, + 0xbf820142, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304080, 0x725d0100, 0xe0304100, 0x725d0200, 0xe0304180, - 0x725d0300, 0xbf820031, + 0x725d0300, 0xbf820032, 0xbef603ff, 0x01000000, 0xbef20378, 0x8078ff78, 0x00000400, 0xbefc0384, @@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304100, 0x725d0100, 0xe0304200, 0x725d0200, 0xe0304300, - 0x725d0300, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0xbef603ff, - 0x01000000, 0xf4211bfa, + 0x725d0300, 0xbf8c3f70, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, + 0xbef603ff, 0x01000000, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0xbef603ff, 0x01000000, + 0xf4211bfa, 0xf0000000, + 0x80788478, 0xf4211b3a, 0xf0000000, 0x80788478, - 0xf4211b3a, 0xf0000000, - 0x80788478, 0xf4211b7a, + 0xf4211b7a, 0xf0000000, + 0x80788478, 0xf4211eba, 0xf0000000, 0x80788478, - 0xf4211eba, 0xf0000000, - 0x80788478, 0xf4211efa, + 0xf4211efa, 0xf0000000, + 0x80788478, 0xf4211c3a, 0xf0000000, 0x80788478, - 0xf4211c3a, 0xf0000000, - 0x80788478, 0xf4211c7a, + 0xf4211c7a, 0xf0000000, + 0x80788478, 0xf4211e7a, 0xf0000000, 0x80788478, - 0xf4211e7a, 0xf0000000, - 0x80788478, 0xf4211cfa, + 0xf4211cfa, 0xf0000000, + 0x80788478, 0xf4211bba, 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef814, 0xf4211bba, - 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef815, - 0xbef2036d, 0x876dff72, - 0x0000ffff, 0xbefc036f, - 0xbefe037a, 0xbeff037b, - 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f71ff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0x876fff72, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbef30380, + 0xb9eef815, 0xbef2036d, + 0x876dff72, 0x0000ffff, + 0xbefc036f, 0xbefe037a, + 0xbeff037b, 0x876f71ff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f71ff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0x876fff72, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbef30380, 0x88736f73, + 0x876fff72, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x88736f73, 0x876fff72, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x88736f73, - 0x876fff72, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x88736f73, 0x876fff70, - 0x00800000, 0x906f976f, - 0xb9f3f807, 0x87fe7e7e, - 0x87ea6a6a, 0xb9f0f802, - 0xbf8a0000, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x88736f73, + 0x876fff70, 0x00800000, + 0x906f976f, 0xb9f3f807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f0f802, 0xbf8a0000, + 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index cdaa523ce6be..4433bda2ce25 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -758,6 +758,7 @@ L_RESTORE_V0: buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0) /* restore SGPRs */ //will be 2+8+16*6 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1d3cd5c50d5f..b6ba0697c531 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -49,7 +49,7 @@ static const char kfd_dev_name[] = "kfd"; static const struct file_operations kfd_fops = { .owner = THIS_MODULE, .unlocked_ioctl = kfd_ioctl, - .compat_ioctl = kfd_ioctl, + .compat_ioctl = compat_ptr_ioctl, .open = kfd_open, .mmap = kfd_mmap, }; @@ -258,6 +258,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, unsigned int queue_id; struct kfd_process_device *pdd; struct queue_properties q_properties; + uint32_t doorbell_offset_in_process = 0; memset(&q_properties, 0, sizeof(struct queue_properties)); @@ -282,11 +283,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_bind_process; } - pr_debug("Creating queue for PASID %d on gpu 0x%x\n", + pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n", p->pasid, dev->id); - err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id); + err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, + &doorbell_offset_in_process); if (err != 0) goto err_create_queue; @@ -296,14 +298,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); - args->doorbell_offset <<= PAGE_SHIFT; if (KFD_IS_SOC15(dev->device_info->asic_family)) - /* On SOC15 ASICs, doorbell allocation must be - * per-device, and independent from the per-process - * queue_id. Return the doorbell offset within the - * doorbell aperture to user mode. + /* On SOC15 ASICs, include the doorbell offset within the + * process doorbell frame, which is 2 pages. */ - args->doorbell_offset |= q_properties.doorbell_off; + args->doorbell_offset |= doorbell_offset_in_process; mutex_unlock(&p->mutex); @@ -332,7 +331,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, int retval; struct kfd_ioctl_destroy_queue_args *args = data; - pr_debug("Destroying queue id %d for pasid %d\n", + pr_debug("Destroying queue id %d for pasid 0x%x\n", args->queue_id, p->pasid); @@ -378,7 +377,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, properties.queue_percent = args->queue_percentage; properties.priority = args->queue_priority; - pr_debug("Updating queue id %d for pasid %d\n", + pr_debug("Updating queue id %d for pasid 0x%x\n", args->queue_id, p->pasid); mutex_lock(&p->mutex); @@ -855,7 +854,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, struct kfd_process_device_apertures *pAperture; struct kfd_process_device *pdd; - dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); args->num_of_nodes = 0; @@ -913,7 +912,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, uint32_t nodes = 0; int ret; - dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); if (args->num_of_nodes == 0) { /* Return number of nodes, so that user space can alloacate @@ -1128,7 +1127,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, mutex_unlock(&p->mutex); if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && - pdd->qpd.vmid != 0) + pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( dev->kgd, args->va_addr, pdd->qpd.vmid); @@ -1312,10 +1311,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, /* MMIO is mapped through kfd device * Generate a kfd mmap offset */ - if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { - args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id); - args->mmap_offset <<= PAGE_SHIFT; - } + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) + args->mmap_offset = KFD_MMAP_TYPE_MMIO + | KFD_MMAP_GPU_ID(args->gpu_id); return 0; @@ -1801,7 +1799,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) } else goto err_i1; - dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); + dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); process = kfd_get_process(current); if (IS_ERR(process)) { @@ -1856,7 +1854,8 @@ err_i1: kfree(kdata); if (retcode) - dev_dbg(kfd_device, "ret = %d\n", retcode); + dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n", + nr, arg, retcode); return retcode; } @@ -1877,7 +1876,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pr_debug("Process %d mapping mmio page\n" + pr_debug("pasid 0x%x mapping mmio page\n" " target user address == 0x%08llX\n" " physical address == 0x%08llX\n" " vm_flags == 0x%04lX\n" @@ -1898,20 +1897,19 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) { struct kfd_process *process; struct kfd_dev *dev = NULL; - unsigned long vm_pgoff; + unsigned long mmap_offset; unsigned int gpu_id; process = kfd_get_process(current); if (IS_ERR(process)) return PTR_ERR(process); - vm_pgoff = vma->vm_pgoff; - vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff); - gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff); + mmap_offset = vma->vm_pgoff << PAGE_SHIFT; + gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset); if (gpu_id) dev = kfd_device_by_id(gpu_id); - switch (vm_pgoff & KFD_MMAP_TYPE_MASK) { + switch (mmap_offset & KFD_MMAP_TYPE_MASK) { case KFD_MMAP_TYPE_DOORBELL: if (!dev) return -ENODEV; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 66387caf966e..de9f68d5c312 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { /* TODO - check & update Vega10 cache details */ #define vega10_cache_info carrizo_cache_info #define raven_cache_info carrizo_cache_info +#define renoir_cache_info carrizo_cache_info /* TODO - check & update Navi10 cache details */ #define navi10_cache_info carrizo_cache_info @@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = raven_cache_info; num_of_cache_types = ARRAY_SIZE(raven_cache_info); break; + case CHIP_RENOIR: + pcache_info = renoir_cache_info; + num_of_cache_types = ARRAY_SIZE(renoir_cache_info); + break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: pcache_info = navi10_cache_info; num_of_cache_types = ARRAY_SIZE(navi10_cache_info); break; @@ -703,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info, cu_info, mem_available, - cu_info->cu_bitmap[i][j], + cu_info->cu_bitmap[i % 4][j + i / 4], ct, cu_processor_id, k); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index a3441b0e385b..27bcc5b472f6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, * The receive packet buff will be sitting on the Indirect Buffer * and in the PQ we put the IB packet + sync packet(s). */ - status = kq->ops.acquire_packet_buffer(kq, + status = kq_acquire_packet_buffer(kq, pq_packets_size_in_bytes / sizeof(uint32_t), &ib_packet_buff); if (status) { - pr_err("acquire_packet_buffer failed\n"); + pr_err("kq_acquire_packet_buffer failed\n"); return status; } @@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, if (status) { pr_err("Failed to allocate GART memory\n"); - kq->ops.rollback_packet(kq); + kq_rollback_packet(kq); return status; } @@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, rm_packet->data_lo = QUEUESTATE__ACTIVE; - kq->ops.submit_packet(kq); + kq_submit_packet(kq); /* Wait till CP writes sync code: */ status = amdkfd_fence_wait_timeout( @@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) properties.type = KFD_QUEUE_TYPE_DIQ; status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL, - &properties, &qid); + &properties, &qid, NULL); if (status) { pr_err("Failed to create DIQ\n"); @@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; + uint16_t queried_pasid; union SQ_CMD_BITS reg_sq_cmd; union GRBM_GFX_INDEX_BITS reg_gfx_index; struct kfd_process_device *pdd; @@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) */ for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { - if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid - (dev->kgd, vmid)) { - if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid - (dev->kgd, vmid) == p->pasid) { - pr_debug("Killing wave fronts of vmid %d and pasid %d\n", - vmid, p->pasid); - break; - } + status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info + (dev->kgd, vmid, &queried_pasid); + + if (status && queried_pasid == p->pasid) { + pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", + vmid, p->pasid); + break; } } if (vmid > last_vmid_to_scan) { - pr_err("Didn't find vmid for pasid %d\n", p->pasid); + pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid); return -EFAULT; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c index 9d4af961c5d1..9bfa50633654 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c @@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p) { if (pmgr->pasid != 0) { - pr_debug("H/W debugger is already active using pasid %d\n", + pr_debug("H/W debugger is already active using pasid 0x%x\n", pmgr->pasid); return -EBUSY; } @@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p) { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != p->pasid) { - pr_debug("H/W debugger is not registered by calling pasid %d\n", + pr_debug("H/W debugger is not registered by calling pasid 0x%x\n", p->pasid); return -EINVAL; } @@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != wac_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid %d\n", + pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", wac_info->process->pasid); return -EINVAL; } @@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr, { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != adw_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid %d\n", + pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", adw_info->process->pasid); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0dc1084b5e82..209bfc849352 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -39,6 +39,41 @@ */ static atomic_t kfd_locked = ATOMIC_INIT(0); +#ifdef CONFIG_DRM_AMDGPU_CIK +extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; +#endif +extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; +extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; +extern const struct kfd2kgd_calls arcturus_kfd2kgd; +extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; + +static const struct kfd2kgd_calls *kfd2kgd_funcs[] = { +#ifdef KFD_SUPPORT_IOMMU_V2 +#ifdef CONFIG_DRM_AMDGPU_CIK + [CHIP_KAVERI] = &gfx_v7_kfd2kgd, +#endif + [CHIP_CARRIZO] = &gfx_v8_kfd2kgd, + [CHIP_RAVEN] = &gfx_v9_kfd2kgd, +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + [CHIP_HAWAII] = &gfx_v7_kfd2kgd, +#endif + [CHIP_TONGA] = &gfx_v8_kfd2kgd, + [CHIP_FIJI] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS10] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS11] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS12] = &gfx_v8_kfd2kgd, + [CHIP_VEGAM] = &gfx_v8_kfd2kgd, + [CHIP_VEGA10] = &gfx_v9_kfd2kgd, + [CHIP_VEGA12] = &gfx_v9_kfd2kgd, + [CHIP_VEGA20] = &gfx_v9_kfd2kgd, + [CHIP_RENOIR] = &gfx_v9_kfd2kgd, + [CHIP_ARCTURUS] = &arcturus_kfd2kgd, + [CHIP_NAVI10] = &gfx_v10_kfd2kgd, + [CHIP_NAVI12] = &gfx_v10_kfd2kgd, + [CHIP_NAVI14] = &gfx_v10_kfd2kgd, +}; + #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { .asic_family = CHIP_KAVERI, @@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = { .num_sdma_queues_per_engine = 8, }; +static const struct kfd_device_info renoir_device_info = { + .asic_family = CHIP_RENOIR, + .asic_name = "renoir", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, + .num_sdma_engines = 1, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 2, +}; + static const struct kfd_device_info navi10_device_info = { .asic_family = CHIP_NAVI10, .asic_name = "navi10", @@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = { .num_sdma_queues_per_engine = 8, }; -struct kfd_deviceid { - unsigned short did; - const struct kfd_device_info *device_info; +static const struct kfd_device_info navi12_device_info = { + .asic_family = CHIP_NAVI12, + .asic_name = "navi12", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .needs_iommu_device = false, + .supports_cwsr = true, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 8, }; -static const struct kfd_deviceid supported_devices[] = { +static const struct kfd_device_info navi14_device_info = { + .asic_family = CHIP_NAVI14, + .asic_name = "navi14", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .needs_iommu_device = false, + .supports_cwsr = true, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 8, +}; + +/* For each entry, [0] is regular and [1] is virtualisation device. */ +static const struct kfd_device_info *kfd_supported_devices[][2] = { #ifdef KFD_SUPPORT_IOMMU_V2 - { 0x1304, &kaveri_device_info }, /* Kaveri */ - { 0x1305, &kaveri_device_info }, /* Kaveri */ - { 0x1306, &kaveri_device_info }, /* Kaveri */ - { 0x1307, &kaveri_device_info }, /* Kaveri */ - { 0x1309, &kaveri_device_info }, /* Kaveri */ - { 0x130A, &kaveri_device_info }, /* Kaveri */ - { 0x130B, &kaveri_device_info }, /* Kaveri */ - { 0x130C, &kaveri_device_info }, /* Kaveri */ - { 0x130D, &kaveri_device_info }, /* Kaveri */ - { 0x130E, &kaveri_device_info }, /* Kaveri */ - { 0x130F, &kaveri_device_info }, /* Kaveri */ - { 0x1310, &kaveri_device_info }, /* Kaveri */ - { 0x1311, &kaveri_device_info }, /* Kaveri */ - { 0x1312, &kaveri_device_info }, /* Kaveri */ - { 0x1313, &kaveri_device_info }, /* Kaveri */ - { 0x1315, &kaveri_device_info }, /* Kaveri */ - { 0x1316, &kaveri_device_info }, /* Kaveri */ - { 0x1317, &kaveri_device_info }, /* Kaveri */ - { 0x1318, &kaveri_device_info }, /* Kaveri */ - { 0x131B, &kaveri_device_info }, /* Kaveri */ - { 0x131C, &kaveri_device_info }, /* Kaveri */ - { 0x131D, &kaveri_device_info }, /* Kaveri */ - { 0x9870, &carrizo_device_info }, /* Carrizo */ - { 0x9874, &carrizo_device_info }, /* Carrizo */ - { 0x9875, &carrizo_device_info }, /* Carrizo */ - { 0x9876, &carrizo_device_info }, /* Carrizo */ - { 0x9877, &carrizo_device_info }, /* Carrizo */ - { 0x15DD, &raven_device_info }, /* Raven */ - { 0x15D8, &raven_device_info }, /* Raven */ + [CHIP_KAVERI] = {&kaveri_device_info, NULL}, + [CHIP_CARRIZO] = {&carrizo_device_info, NULL}, + [CHIP_RAVEN] = {&raven_device_info, NULL}, #endif - { 0x67A0, &hawaii_device_info }, /* Hawaii */ - { 0x67A1, &hawaii_device_info }, /* Hawaii */ - { 0x67A2, &hawaii_device_info }, /* Hawaii */ - { 0x67A8, &hawaii_device_info }, /* Hawaii */ - { 0x67A9, &hawaii_device_info }, /* Hawaii */ - { 0x67AA, &hawaii_device_info }, /* Hawaii */ - { 0x67B0, &hawaii_device_info }, /* Hawaii */ - { 0x67B1, &hawaii_device_info }, /* Hawaii */ - { 0x67B8, &hawaii_device_info }, /* Hawaii */ - { 0x67B9, &hawaii_device_info }, /* Hawaii */ - { 0x67BA, &hawaii_device_info }, /* Hawaii */ - { 0x67BE, &hawaii_device_info }, /* Hawaii */ - { 0x6920, &tonga_device_info }, /* Tonga */ - { 0x6921, &tonga_device_info }, /* Tonga */ - { 0x6928, &tonga_device_info }, /* Tonga */ - { 0x6929, &tonga_device_info }, /* Tonga */ - { 0x692B, &tonga_device_info }, /* Tonga */ - { 0x6938, &tonga_device_info }, /* Tonga */ - { 0x6939, &tonga_device_info }, /* Tonga */ - { 0x7300, &fiji_device_info }, /* Fiji */ - { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/ - { 0x67C0, &polaris10_device_info }, /* Polaris10 */ - { 0x67C1, &polaris10_device_info }, /* Polaris10 */ - { 0x67C2, &polaris10_device_info }, /* Polaris10 */ - { 0x67C4, &polaris10_device_info }, /* Polaris10 */ - { 0x67C7, &polaris10_device_info }, /* Polaris10 */ - { 0x67C8, &polaris10_device_info }, /* Polaris10 */ - { 0x67C9, &polaris10_device_info }, /* Polaris10 */ - { 0x67CA, &polaris10_device_info }, /* Polaris10 */ - { 0x67CC, &polaris10_device_info }, /* Polaris10 */ - { 0x67CF, &polaris10_device_info }, /* Polaris10 */ - { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ - { 0x67DF, &polaris10_device_info }, /* Polaris10 */ - { 0x6FDF, &polaris10_device_info }, /* Polaris10 */ - { 0x67E0, &polaris11_device_info }, /* Polaris11 */ - { 0x67E1, &polaris11_device_info }, /* Polaris11 */ - { 0x67E3, &polaris11_device_info }, /* Polaris11 */ - { 0x67E7, &polaris11_device_info }, /* Polaris11 */ - { 0x67E8, &polaris11_device_info }, /* Polaris11 */ - { 0x67E9, &polaris11_device_info }, /* Polaris11 */ - { 0x67EB, &polaris11_device_info }, /* Polaris11 */ - { 0x67EF, &polaris11_device_info }, /* Polaris11 */ - { 0x67FF, &polaris11_device_info }, /* Polaris11 */ - { 0x6980, &polaris12_device_info }, /* Polaris12 */ - { 0x6981, &polaris12_device_info }, /* Polaris12 */ - { 0x6985, &polaris12_device_info }, /* Polaris12 */ - { 0x6986, &polaris12_device_info }, /* Polaris12 */ - { 0x6987, &polaris12_device_info }, /* Polaris12 */ - { 0x6995, &polaris12_device_info }, /* Polaris12 */ - { 0x6997, &polaris12_device_info }, /* Polaris12 */ - { 0x699F, &polaris12_device_info }, /* Polaris12 */ - { 0x694C, &vegam_device_info }, /* VegaM */ - { 0x694E, &vegam_device_info }, /* VegaM */ - { 0x694F, &vegam_device_info }, /* VegaM */ - { 0x6860, &vega10_device_info }, /* Vega10 */ - { 0x6861, &vega10_device_info }, /* Vega10 */ - { 0x6862, &vega10_device_info }, /* Vega10 */ - { 0x6863, &vega10_device_info }, /* Vega10 */ - { 0x6864, &vega10_device_info }, /* Vega10 */ - { 0x6867, &vega10_device_info }, /* Vega10 */ - { 0x6868, &vega10_device_info }, /* Vega10 */ - { 0x6869, &vega10_device_info }, /* Vega10 */ - { 0x686A, &vega10_device_info }, /* Vega10 */ - { 0x686B, &vega10_device_info }, /* Vega10 */ - { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ - { 0x686D, &vega10_device_info }, /* Vega10 */ - { 0x686E, &vega10_device_info }, /* Vega10 */ - { 0x686F, &vega10_device_info }, /* Vega10 */ - { 0x687F, &vega10_device_info }, /* Vega10 */ - { 0x69A0, &vega12_device_info }, /* Vega12 */ - { 0x69A1, &vega12_device_info }, /* Vega12 */ - { 0x69A2, &vega12_device_info }, /* Vega12 */ - { 0x69A3, &vega12_device_info }, /* Vega12 */ - { 0x69AF, &vega12_device_info }, /* Vega12 */ - { 0x66a0, &vega20_device_info }, /* Vega20 */ - { 0x66a1, &vega20_device_info }, /* Vega20 */ - { 0x66a2, &vega20_device_info }, /* Vega20 */ - { 0x66a3, &vega20_device_info }, /* Vega20 */ - { 0x66a4, &vega20_device_info }, /* Vega20 */ - { 0x66a7, &vega20_device_info }, /* Vega20 */ - { 0x66af, &vega20_device_info }, /* Vega20 */ - { 0x738C, &arcturus_device_info }, /* Arcturus */ - { 0x7388, &arcturus_device_info }, /* Arcturus */ - { 0x738E, &arcturus_device_info }, /* Arcturus */ - { 0x7390, &arcturus_device_info }, /* Arcturus vf */ - { 0x7310, &navi10_device_info }, /* Navi10 */ - { 0x7312, &navi10_device_info }, /* Navi10 */ - { 0x7318, &navi10_device_info }, /* Navi10 */ - { 0x731a, &navi10_device_info }, /* Navi10 */ - { 0x731f, &navi10_device_info }, /* Navi10 */ + [CHIP_HAWAII] = {&hawaii_device_info, NULL}, + [CHIP_TONGA] = {&tonga_device_info, NULL}, + [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info}, + [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info}, + [CHIP_POLARIS11] = {&polaris11_device_info, NULL}, + [CHIP_POLARIS12] = {&polaris12_device_info, NULL}, + [CHIP_VEGAM] = {&vegam_device_info, NULL}, + [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info}, + [CHIP_VEGA12] = {&vega12_device_info, NULL}, + [CHIP_VEGA20] = {&vega20_device_info, NULL}, + [CHIP_RENOIR] = {&renoir_device_info, NULL}, + [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info}, + [CHIP_NAVI10] = {&navi10_device_info, NULL}, + [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info}, + [CHIP_NAVI14] = {&navi14_device_info, NULL}, }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, @@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); -static const struct kfd_device_info *lookup_device_info(unsigned short did) +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, unsigned int asic_type, bool vf) { - size_t i; + struct kfd_dev *kfd; + const struct kfd_device_info *device_info; + const struct kfd2kgd_calls *f2g; - for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { - if (supported_devices[i].did == did) { - WARN_ON(!supported_devices[i].device_info); - return supported_devices[i].device_info; - } + if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2) + || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) { + dev_err(kfd_device, "asic_type %d out of range\n", asic_type); + return NULL; /* asic_type out of range */ } - dev_warn(kfd_device, "DID %04x is missing in supported_devices\n", - did); - - return NULL; -} - -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, - struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) -{ - struct kfd_dev *kfd; - const struct kfd_device_info *device_info = - lookup_device_info(pdev->device); + device_info = kfd_supported_devices[asic_type][vf]; + f2g = kfd2kgd_funcs[asic_type]; - if (!device_info) { - dev_err(kfd_device, "kgd2kfd_probe failed\n"); + if (!device_info || !f2g) { + dev_err(kfd_device, "%s %s not supported in kfd\n", + amdgpu_asic_name[asic_type], vf ? "VF" : ""); return NULL; } @@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) } bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) { unsigned int size; + kfd->ddev = ddev; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_MEC1); kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, @@ -751,9 +730,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) return 0; kgd2kfd_suspend(kfd); - /* hold dqm->lock to prevent further execution*/ - dqm_lock(kfd->dqm); - kfd_signal_reset_event(kfd); return 0; } @@ -766,17 +742,15 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { - int ret, count; + int ret; if (!kfd->init_complete) return 0; - dqm_unlock(kfd->dqm); - ret = kfd_resume(kfd); if (ret) return ret; - count = atomic_dec_return(&kfd_locked); + atomic_dec(&kfd_locked); atomic_set(&kfd->sram_ecc_flag, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d985e31fcc1e..f7f6df40875e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) } q->properties.doorbell_off = - kfd_doorbell_id_to_offset(dev, q->process, + kfd_get_doorbell_dw_offset_in_bar(dev, q->process, q->doorbell_id); return 0; @@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { - int bit, allocated_vmid; + int allocated_vmid = -1, i; - if (dqm->vmid_bitmap == 0) - return -ENOMEM; + for (i = dqm->dev->vm_info.first_vmid_kfd; + i <= dqm->dev->vm_info.last_vmid_kfd; i++) { + if (!dqm->vmid_pasid[i]) { + allocated_vmid = i; + break; + } + } + + if (allocated_vmid < 0) { + pr_err("no more vmid to allocate\n"); + return -ENOSPC; + } + + pr_debug("vmid allocated: %d\n", allocated_vmid); + + dqm->vmid_pasid[allocated_vmid] = q->process->pasid; - bit = ffs(dqm->vmid_bitmap) - 1; - dqm->vmid_bitmap &= ~(1 << bit); + set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid); - allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; - pr_debug("vmid allocation %d\n", allocated_vmid); qpd->vmid = allocated_vmid; q->properties.vmid = allocated_vmid; - set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); program_sh_mem_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm, /* invalidate the VM context after pasid and vmid mapping is set up */ kfd_flush_tlb(qpd_to_pdd(qpd)); - dqm->dev->kfd2kgd->set_scratch_backing_va( - dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid); + if (dqm->dev->kfd2kgd->set_scratch_backing_va) + dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd, + qpd->sh_hidden_private_base, qpd->vmid); return 0; } @@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { - int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; - /* On GFX v7, CP doesn't flush TC at dequeue */ if (q->device->device_info->asic_family == CHIP_HAWAII) if (flush_texture_cache_nocpsch(q->device, qpd)) @@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm, /* Release the vmid mapping */ set_pasid_vmid_mapping(dqm, 0, qpd->vmid); + dqm->vmid_pasid[qpd->vmid] = 0; - dqm->vmid_bitmap |= (1 << bit); qpd->vmid = 0; q->properties.vmid = 0; } @@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (q->properties.is_active) { + if (!dqm->sched_running) { + WARN_ONCE(1, "Load non-HWS mqd while stopped\n"); + goto add_queue_to_list; + } if (WARN(q->process->mm != current->mm, "should only run in user thread")) @@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, goto out_free_mqd; } +add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) @@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, deallocate_doorbell(qpd, q); + if (!dqm->sched_running) { + WARN_ONCE(1, "Destroy non-HWS queue while stopped\n"); + return 0; + } + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_RESET, KFD_UNMAP_LATENCY_MS, @@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || q->properties.type == KFD_QUEUE_TYPE_SDMA || q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { + + if (!dqm->sched_running) { + WARN_ONCE(1, "Update non-HWS queue while stopped\n"); + goto out_unlock; + } + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); - pr_info_ratelimited("Evicting PASID %u queues\n", + pr_info_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); /* Mark all queues as evicted. Deactivate all active queues on @@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; + dqm->queue_count--; + + if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) + continue; + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, * maintain a consistent eviction state */ ret = retval; - dqm->queue_count--; } out: @@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); - pr_info_ratelimited("Evicting PASID %u queues\n", + pr_info_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); /* Mark all queues as evicted. Deactivate all active queues on @@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, goto out; } - pr_info_ratelimited("Restoring PASID %u queues\n", + pr_info_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ @@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; + dqm->queue_count++; + + if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) + continue; + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, &q->properties, mm); if (retval && !ret) @@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, * maintain a consistent eviction state */ ret = retval; - dqm->queue_count++; } qpd->evicted = 0; out: @@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, goto out; } - pr_info_ratelimited("Restoring PASID %u queues\n", + pr_info_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ @@ -879,7 +912,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) dqm->allocated_queues[pipe] |= 1 << queue; } - dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; + memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid)); + dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); @@ -902,12 +936,20 @@ static void uninitialize(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm) { init_interrupts(dqm); - return pm_init(&dqm->packets, dqm); + + if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + return pm_init(&dqm->packets, dqm); + dqm->sched_running = true; + + return 0; } static int stop_nocpsch(struct device_queue_manager *dqm) { - pm_uninit(&dqm->packets); + if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + pm_uninit(&dqm->packets); + dqm->sched_running = false; + return 0; } @@ -1058,6 +1100,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); /* clear hang status when driver try to start the hw scheduler */ dqm->is_hws_hang = false; + dqm->sched_running = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1073,6 +1116,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) { dqm_lock(dqm); unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + dqm->sched_running = false; dqm_unlock(dqm); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); @@ -1259,9 +1303,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) { int retval; + if (!dqm->sched_running) + return 0; if (dqm->queue_count <= 0 || dqm->processes_count <= 0) return 0; - if (dqm->active_runlist) return 0; @@ -1283,6 +1328,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, { int retval = 0; + if (!dqm->sched_running) + return 0; if (dqm->is_hws_hang) return -EIO; if (!dqm->active_runlist) @@ -1548,7 +1595,7 @@ static int get_wave_state(struct device_queue_manager *dqm, goto dqm_unlock; } - mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE]; + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; if (!mqd_mgr->get_wave_state) { r = -EINVAL; @@ -1676,7 +1723,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_dev *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * - dev->device_info->num_sdma_engines * + (dev->device_info->num_sdma_engines + + dev->device_info->num_xgmi_sdma_engines) * dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; @@ -1786,10 +1834,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: device_queue_manager_init_v9(&dqm->asic_ops); break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: device_queue_manager_init_v10_navi10(&dqm->asic_ops); break; default: @@ -1883,6 +1934,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) int pipe, queue; int r = 0; + if (!dqm->sched_running) { + seq_printf(m, " Device is stopped\n"); + + return 0; + } + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); @@ -1917,7 +1974,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) { + for (pipe = 0; pipe < get_num_sdma_engines(dqm) + + get_num_xgmi_sdma_engines(dqm); pipe++) { for (queue = 0; queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 90db2c9275f6..a8c37e6da027 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -32,6 +32,8 @@ #include "kfd_mqd_manager.h" +#define VMID_NUM 16 + struct device_process_node { struct qcm_process_device *qpd; struct list_head list; @@ -185,7 +187,8 @@ struct device_queue_manager { unsigned int *allocated_queues; uint64_t sdma_bitmap; uint64_t xgmi_sdma_bitmap; - unsigned int vmid_bitmap; + /* the pasid mapping for each kfd vmid */ + uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; struct kfd_mem_obj *pipeline_mem; uint64_t fence_gpu_addr; @@ -198,6 +201,7 @@ struct device_queue_manager { bool is_hws_hang; struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; + bool sched_running; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index ebe79bf00145..8e0c00b9555e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd) kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + doorbell_start_offset; - kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32); + kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32); kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, kfd_doorbell_process_slice(kfd)); @@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd) pr_debug("doorbell base == 0x%08lX\n", (uintptr_t)kfd->doorbell_base); - pr_debug("doorbell_id_offset == 0x%08lX\n", - kfd->doorbell_id_offset); + pr_debug("doorbell_base_dw_offset == 0x%08lX\n", + kfd->doorbell_base_dw_offset); pr_debug("doorbell_process_limit == 0x%08lX\n", doorbell_process_limit); @@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, * Calculating the kernel doorbell offset using the first * doorbell page. */ - *doorbell_off = kfd->doorbell_id_offset + inx; + *doorbell_off = kfd->doorbell_base_dw_offset + inx; pr_debug("Get kernel queue doorbell\n" " doorbell offset == 0x%08X\n" @@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value) } } -unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, +unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, struct kfd_process *process, unsigned int doorbell_id) { /* - * doorbell_id_offset accounts for doorbells taken by KGD. + * doorbell_base_dw_offset accounts for doorbells taken by KGD. * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to * the process's doorbells. The offset returned is in dword * units regardless of the ASIC-dependent doorbell size. */ - return kfd->doorbell_id_offset + + return kfd->doorbell_base_dw_offset + process->doorbell_index * kfd_doorbell_process_slice(kfd) / sizeof(u32) + doorbell_id * kfd->device_info->doorbell_size / sizeof(u32); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index d674d4b3340f..1f8365575b12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -346,7 +346,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, ret = create_signal_event(devkfd, p, ev); if (!ret) { *event_page_offset = KFD_MMAP_TYPE_EVENTS; - *event_page_offset <<= PAGE_SHIFT; *event_slot_index = ev->event_id; } break; @@ -852,8 +851,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, if (type == KFD_EVENT_TYPE_MEMORY) { dev_warn(kfd_device, - "Sending SIGSEGV to HSA Process with PID %d ", - p->lead_thread->pid); + "Sending SIGSEGV to process %d (pasid 0x%x)", + p->lead_thread->pid, p->pasid); send_sig(SIGSEGV, p->lead_thread, 0); } @@ -861,13 +860,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, if (send_signal) { if (send_sigterm) { dev_warn(kfd_device, - "Sending SIGTERM to HSA Process with PID %d ", - p->lead_thread->pid); + "Sending SIGTERM to process %d (pasid 0x%x)", + p->lead_thread->pid, p->pasid); send_sig(SIGTERM, p->lead_thread, 0); } else { dev_err(kfd_device, - "HSA Process (PID %d) got unhandled exception", - p->lead_thread->pid); + "Process %d (pasid 0x%x) got unhandled exception", + p->lead_thread->pid, p->pasid); } } } @@ -936,7 +935,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, /* Workaround on Raven to not kill the process when memory is freed * before IOMMU is able to finish processing all the excessive PPRs */ - if (dev->device_info->asic_family != CHIP_RAVEN) { + if (dev->device_info->asic_family != CHIP_RAVEN && + dev->device_info->asic_family != CHIP_RENOIR) { mutex_lock(&p->event_mutex); /* Lookup events by type and signal them */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 9dc4bff8085e..bb77b8890e77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process) /*Iterating over all devices*/ while (kfd_topology_enum_kfd_devices(id, &dev) == 0) { - if (!dev) { - id++; /* Skip non GPU devices */ + if (!dev || kfd_devcgroup_check_permission(dev)) { + /* Skip non GPU devices and devices to which the + * current process have no access to. Access can be + * limited by placing the process in a specific + * cgroup hierarchy + */ + id++; continue; } @@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: kfd_init_apertures_v9(pdd, id); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 3ef67d2e0d9f..e05d75ecda21 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, memcpy(patched_ihre, ih_ring_entry, dev->device_info->ih_ring_entry_size); - pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid( - dev->kgd, vmid); + pasid = dev->dqm->vmid_pasid[vmid]; /* Patch the pasid field */ patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3]) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index c56ac47cd318..bc47f6a44456 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd) } kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!kfd->ih_wq)) { + kfifo_free(&kfd->ih_fifo); + dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n"); + return -ENOMEM; + } spin_lock_init(&kfd->interrupt_lock); INIT_WORK(&kfd->interrupt_work, interrupt_wq); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 5f35df23fb18..8d871514671e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -62,9 +62,6 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) struct amd_iommu_device_info iommu_info; unsigned int pasid_limit; int err; - struct kfd_topology_device *top_dev; - - top_dev = kfd_topology_device_by_id(kfd->id); if (!kfd->device_info->needs_iommu_device) return 0; @@ -160,7 +157,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) if (!p) return; - pr_debug("Unbinding process %d from IOMMU\n", pasid); + pr_debug("Unbinding process 0x%x from IOMMU\n", pasid); mutex_lock(kfd_get_dbgmgr_mutex()); @@ -194,7 +191,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, struct kfd_dev *dev; dev_warn_ratelimited(kfd_device, - "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", + "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", PCI_BUS_NUM(pdev->devfn), PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), @@ -235,7 +232,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) err = amd_iommu_bind_pasid(kfd->pdev, p->pasid, p->lead_thread); if (err < 0) { - pr_err("Unexpected pasid %d binding failure\n", + pr_err("Unexpected pasid 0x%x binding failure\n", p->pasid); mutex_unlock(&p->mutex); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 8b4564f71a7a..2d56dc534459 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -34,7 +34,10 @@ #define PM4_COUNT_ZERO (((1 << 15) - 1) << 16) -static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, +/* Initialize a kernel queue, including allocations of GART memory + * needed for the queue. + */ +static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size) { struct queue_properties prop; @@ -87,9 +90,17 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_gpu_addr = kq->pq->gpu_addr; - retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); - if (!retval) - goto err_eop_allocate_vidmem; + /* For CIK family asics, kq->eop_mem is not needed */ + if (dev->device_info->asic_family > CHIP_MULLINS) { + retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); + if (retval != 0) + goto err_eop_allocate_vidmem; + + kq->eop_gpu_addr = kq->eop_mem->gpu_addr; + kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; + + memset(kq->eop_kernel_addr, 0, PAGE_SIZE); + } retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel), &kq->rptr_mem); @@ -183,7 +194,8 @@ err_get_kernel_doorbell: } -static void uninitialize(struct kernel_queue *kq) +/* Uninitialize a kernel queue and free all its memory usages. */ +static void kq_uninitialize(struct kernel_queue *kq) { if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) kq->mqd_mgr->destroy_mqd(kq->mqd_mgr, @@ -200,14 +212,19 @@ static void uninitialize(struct kernel_queue *kq) kfd_gtt_sa_free(kq->dev, kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem); - kq->ops_asic_specific.uninitialize(kq); + + /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free() + * is able to handle NULL properly. + */ + kfd_gtt_sa_free(kq->dev, kq->eop_mem); + kfd_gtt_sa_free(kq->dev, kq->pq); kfd_release_kernel_doorbell(kq->dev, kq->queue->properties.doorbell_ptr); uninit_queue(kq->queue); } -static int acquire_packet_buffer(struct kernel_queue *kq, +int kq_acquire_packet_buffer(struct kernel_queue *kq, size_t packet_size_in_dwords, unsigned int **buffer_ptr) { size_t available_size; @@ -268,7 +285,7 @@ err_no_space: return -ENOMEM; } -static void submit_packet(struct kernel_queue *kq) +void kq_submit_packet(struct kernel_queue *kq) { #ifdef DEBUG int i; @@ -280,11 +297,18 @@ static void submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - - kq->ops_asic_specific.submit_packet(kq); + if (kq->dev->device_info->doorbell_size == 8) { + *kq->wptr64_kernel = kq->pending_wptr64; + write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, + kq->pending_wptr64); + } else { + *kq->wptr_kernel = kq->pending_wptr; + write_kernel_doorbell(kq->queue->properties.doorbell_ptr, + kq->pending_wptr); + } } -static void rollback_packet(struct kernel_queue *kq) +void kq_rollback_packet(struct kernel_queue *kq) { if (kq->dev->device_info->doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; @@ -304,57 +328,18 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, if (!kq) return NULL; - kq->ops.initialize = initialize; - kq->ops.uninitialize = uninitialize; - kq->ops.acquire_packet_buffer = acquire_packet_buffer; - kq->ops.submit_packet = submit_packet; - kq->ops.rollback_packet = rollback_packet; - - switch (dev->device_info->asic_family) { - case CHIP_CARRIZO: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - kernel_queue_init_vi(&kq->ops_asic_specific); - break; - - case CHIP_KAVERI: - case CHIP_HAWAII: - kernel_queue_init_cik(&kq->ops_asic_specific); - break; - - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - kernel_queue_init_v9(&kq->ops_asic_specific); - break; - case CHIP_NAVI10: - kernel_queue_init_v10(&kq->ops_asic_specific); - break; - default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - goto out_free; - } - - if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) + if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) return kq; pr_err("Failed to init kernel queue\n"); -out_free: kfree(kq); return NULL; } void kernel_queue_uninit(struct kernel_queue *kq) { - kq->ops.uninitialize(kq); + kq_uninitialize(kq); kfree(kq); } @@ -374,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) return; } - retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); + retval = kq_acquire_packet_buffer(kq, 5, &buffer); if (unlikely(retval != 0)) { pr_err(" Failed to acquire packet buffer\n"); pr_err("Kernel queue test failed\n"); @@ -382,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) } for (i = 0; i < 5; i++) buffer[i] = kq->nop_packet; - kq->ops.submit_packet(kq); + kq_submit_packet(kq); pr_err("Ending kernel queue test\n"); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 365fc674fea4..f4cfe9f1871c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -29,45 +29,28 @@ #include "kfd_priv.h" /** - * struct kernel_queue_ops - * - * @initialize: Initialize a kernel queue, including allocations of GART memory - * needed for the queue. - * - * @uninitialize: Uninitialize a kernel queue and free all its memory usages. - * - * @acquire_packet_buffer: Returns a pointer to the location in the kernel + * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel * queue ring buffer where the calling function can write its packet. It is * Guaranteed that there is enough space for that packet. It also updates the * pending write pointer to that location so subsequent calls to * acquire_packet_buffer will get a correct write pointer * - * @submit_packet: Update the write pointer and doorbell of a kernel queue. - * - * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel - * queue are equal, which means the CP has read all the submitted packets. + * kq_submit_packet: Update the write pointer and doorbell of a kernel queue. * - * @rollback_packet: This routine is called if we failed to build an acquired + * kq_rollback_packet: This routine is called if we failed to build an acquired * packet for some reason. It just overwrites the pending wptr with the current * one * */ -struct kernel_queue_ops { - bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); - void (*uninitialize)(struct kernel_queue *kq); - int (*acquire_packet_buffer)(struct kernel_queue *kq, - size_t packet_size_in_dwords, - unsigned int **buffer_ptr); - void (*submit_packet)(struct kernel_queue *kq); - void (*rollback_packet)(struct kernel_queue *kq); -}; +int kq_acquire_packet_buffer(struct kernel_queue *kq, + size_t packet_size_in_dwords, + unsigned int **buffer_ptr); +void kq_submit_packet(struct kernel_queue *kq); +void kq_rollback_packet(struct kernel_queue *kq); -struct kernel_queue { - struct kernel_queue_ops ops; - struct kernel_queue_ops ops_asic_specific; +struct kernel_queue { /* data */ struct kfd_dev *dev; struct mqd_manager *mqd_mgr; @@ -99,9 +82,4 @@ struct kernel_queue { struct list_head list; }; -void kernel_queue_init_cik(struct kernel_queue_ops *ops); -void kernel_queue_init_vi(struct kernel_queue_ops *ops); -void kernel_queue_init_v9(struct kernel_queue_ops *ops); -void kernel_queue_init_v10(struct kernel_queue_ops *ops); - #endif /* KFD_KERNEL_QUEUE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c deleted file mode 100644 index aed32ab7102e..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c +++ /dev/null @@ -1,348 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "kfd_kernel_queue.h" -#include "kfd_device_queue_manager.h" -#include "kfd_pm4_headers_ai.h" -#include "kfd_pm4_opcodes.h" -#include "gc/gc_10_1_0_sh_mask.h" - -static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_v10(struct kernel_queue *kq); -static void submit_packet_v10(struct kernel_queue *kq); - -void kernel_queue_init_v10(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_v10; - ops->uninitialize = uninitialize_v10; - ops->submit_packet = submit_packet_v10; -} - -static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - int retval; - - retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); - if (retval != 0) - return false; - - kq->eop_gpu_addr = kq->eop_mem->gpu_addr; - kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; - - memset(kq->eop_kernel_addr, 0, PAGE_SIZE); - - return true; -} - -static void uninitialize_v10(struct kernel_queue *kq) -{ - kfd_gtt_sa_free(kq->dev, kq->eop_mem); -} - -static void submit_packet_v10(struct kernel_queue *kq) -{ - *kq->wptr64_kernel = kq->pending_wptr64; - write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, - kq->pending_wptr64); -} - -static int pm_map_process_v10(struct packet_manager *pm, - uint32_t *buffer, struct qcm_process_device *qpd) -{ - struct pm4_mes_map_process *packet; - uint64_t vm_page_table_base_addr = qpd->page_table_base; - - packet = (struct pm4_mes_map_process *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_process)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, - sizeof(struct pm4_mes_map_process)); - packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; - packet->bitfields2.process_quantum = 1; - packet->bitfields2.pasid = qpd->pqm->process->pasid; - packet->bitfields14.gds_size = qpd->gds_size; - packet->bitfields14.num_gws = qpd->num_gws; - packet->bitfields14.num_oac = qpd->num_oac; - packet->bitfields14.sdma_enable = 1; - - packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; - - packet->sh_mem_config = qpd->sh_mem_config; - packet->sh_mem_bases = qpd->sh_mem_bases; - if (qpd->tba_addr) { - packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) | - upper_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); - packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); - } - - packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); - packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); - - packet->vm_context_page_table_base_addr_lo32 = - lower_32_bits(vm_page_table_base_addr); - packet->vm_context_page_table_base_addr_hi32 = - upper_32_bits(vm_page_table_base_addr); - - return 0; -} - -static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer, - uint64_t ib, size_t ib_size_in_dwords, bool chain) -{ - struct pm4_mes_runlist *packet; - - int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; - - /* Determine the number of processes to map together to HW: - * it can not exceed the number of VMIDs available to the - * scheduler, and it is determined by the smaller of the number - * of processes in the runlist and kfd module parameter - * hws_max_conc_proc. - * Note: the arbitration between the number of VMIDs and - * hws_max_conc_proc has been done in - * kgd2kfd_device_init(). - */ - concurrent_proc_cnt = min(pm->dqm->processes_count, - kfd->max_proc_per_quantum); - - - packet = (struct pm4_mes_runlist *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_runlist)); - packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, - sizeof(struct pm4_mes_runlist)); - - packet->bitfields4.ib_size = ib_size_in_dwords; - packet->bitfields4.chain = chain ? 1 : 0; - packet->bitfields4.offload_polling = 0; - packet->bitfields4.valid = 1; - packet->bitfields4.process_cnt = concurrent_proc_cnt; - packet->ordinal2 = lower_32_bits(ib); - packet->ib_base_hi = upper_32_bits(ib); - - return 0; -} - -static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer, - struct queue *q, bool is_static) -{ - struct pm4_mes_map_queues *packet; - bool use_static = is_static; - - packet = (struct pm4_mes_map_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, - sizeof(struct pm4_mes_map_queues)); - packet->bitfields2.num_queues = 1; - packet->bitfields2.queue_sel = - queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; - - packet->bitfields2.engine_sel = - engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_compute_vi; - - switch (q->properties.type) { - case KFD_QUEUE_TYPE_COMPUTE: - if (use_static) - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_latency_static_queue_vi; - break; - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.queue_type = - queue_type__mes_map_queues__debug_interface_queue_vi; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = q->properties.sdma_engine_id + - engine_sel__mes_map_queues__sdma0_vi; - use_static = false; /* no static queues under SDMA */ - break; - default: - WARN(1, "queue type %d\n", q->properties.type); - return -EINVAL; - } - packet->bitfields3.doorbell_offset = - q->properties.doorbell_off; - - packet->mqd_addr_lo = - lower_32_bits(q->gart_mqd_addr); - - packet->mqd_addr_hi = - upper_32_bits(q->gart_mqd_addr); - - packet->wptr_addr_lo = - lower_32_bits((uint64_t)q->properties.write_ptr); - - packet->wptr_addr_hi = - upper_32_bits((uint64_t)q->properties.write_ptr); - - return 0; -} - -static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer, - enum kfd_queue_type type, - enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset, - unsigned int sdma_engine) -{ - struct pm4_mes_unmap_queues *packet; - - packet = (struct pm4_mes_unmap_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); - - packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, - sizeof(struct pm4_mes_unmap_queues)); - switch (type) { - case KFD_QUEUE_TYPE_COMPUTE: - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__compute; - break; - case KFD_QUEUE_TYPE_SDMA: - case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; - break; - default: - WARN(1, "queue type %d\n", type); - break; - } - - if (reset) - packet->bitfields2.action = - action__mes_unmap_queues__reset_queues; - else - packet->bitfields2.action = - action__mes_unmap_queues__preempt_queues; - - switch (filter) { - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; - packet->bitfields2.num_queues = 1; - packet->bitfields3b.doorbell_offset0 = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_BY_PASID: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; - packet->bitfields3a.pasid = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_queues; - break; - case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: - /* in this case, we do not preempt static queues */ - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_non_static_queues; - break; - default: - WARN(1, "filter %d\n", filter); - break; - } - - return 0; - -} - -static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer, - uint64_t fence_address, uint32_t fence_value) -{ - struct pm4_mes_query_status *packet; - - packet = (struct pm4_mes_query_status *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_query_status)); - - - packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, - sizeof(struct pm4_mes_query_status)); - - packet->bitfields2.context_id = 0; - packet->bitfields2.interrupt_sel = - interrupt_sel__mes_query_status__completion_status; - packet->bitfields2.command = - command__mes_query_status__fence_only_after_write_ack; - - packet->addr_hi = upper_32_bits((uint64_t)fence_address); - packet->addr_lo = lower_32_bits((uint64_t)fence_address); - packet->data_hi = upper_32_bits((uint64_t)fence_value); - packet->data_lo = lower_32_bits((uint64_t)fence_value); - - return 0; -} - - -static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer) -{ - struct pm4_mec_release_mem *packet; - - WARN_ON(!buffer); - - packet = (struct pm4_mec_release_mem *)buffer; - memset(buffer, 0, sizeof(struct pm4_mec_release_mem)); - - packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, - sizeof(struct pm4_mec_release_mem)); - - packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe; - packet->bitfields2.tcl1_action_ena = 1; - packet->bitfields2.tc_action_ena = 1; - packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru; - - packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low; - packet->bitfields3.int_sel = - int_sel__mec_release_mem__send_interrupt_after_write_confirm; - - packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; - packet->address_hi = upper_32_bits(gpu_addr); - - packet->data_lo = 0; - - return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int); -} - -const struct packet_manager_funcs kfd_v10_pm_funcs = { - .map_process = pm_map_process_v10, - .runlist = pm_runlist_v10, - .set_resources = pm_set_resources_vi, - .map_queues = pm_map_queues_v10, - .unmap_queues = pm_unmap_queues_v10, - .query_status = pm_query_status_v10, - .release_mem = pm_release_mem_v10, - .map_process_size = sizeof(struct pm4_mes_map_process), - .runlist_size = sizeof(struct pm4_mes_runlist), - .set_resources_size = sizeof(struct pm4_mes_set_resources), - .map_queues_size = sizeof(struct pm4_mes_map_queues), - .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), - .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = sizeof(struct pm4_mec_release_mem) -}; - diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 986ff52d5750..f4b7f7e6c40e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -82,7 +82,7 @@ static void kfd_exit(void) kfd_chardev_exit(); } -int kgd2kfd_init() +int kgd2kfd_init(void) { return kfd_init(); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 28876aceb14b..19f0fe547c57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -374,7 +374,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; mqd->free_mqd = free_mqd; @@ -401,7 +400,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, #endif break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; @@ -442,7 +441,7 @@ struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, mqd = mqd_manager_init_cik(type, dev); if (!mqd) return NULL; - if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE)) + if (type == KFD_MQD_TYPE_CP) mqd->update_mqd = update_mqd_hawaii; return mqd; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 9cd3eb2d90bd..7832ec6e480b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -66,38 +66,22 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m->compute_static_thread_mgmt_se3); } +static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) +{ + m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; + m->cp_hqd_queue_priority = q->priority; +} + static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, struct queue_properties *q) { - int retval; - struct kfd_mem_obj *mqd_mem_obj = NULL; + struct kfd_mem_obj *mqd_mem_obj; - /* From V9, for CWSR, the control stack is located on the next page - * boundary after the mqd, we will use the gtt allocation function - * instead of sub-allocation function. - */ - if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { - mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); - if (!mqd_mem_obj) - return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, - ALIGN(q->ctl_stack_size, PAGE_SIZE) + - ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE), - &(mqd_mem_obj->gtt_mem), - &(mqd_mem_obj->gpu_addr), - (void *)&(mqd_mem_obj->cpu_ptr), true); - } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), - &mqd_mem_obj); - } - - if (retval) { - kfree(mqd_mem_obj); + if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), + &mqd_mem_obj)) return NULL; - } return mqd_mem_obj; - } static void init_mqd(struct mqd_manager *mm, void **mqd, @@ -131,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; - m->cp_hqd_pipe_priority = 1; - m->cp_hqd_queue_priority = 15; - if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; @@ -230,11 +211,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, q); + set_priority(m, q); - q->is_active = (q->queue_size > 0 && - q->queue_address != 0 && - q->queue_percent > 0 && - !q->is_evicted); + q->is_active = QUEUE_IS_ACTIVE(*q); } static int destroy_mqd(struct mqd_manager *mm, void *mqd, @@ -250,14 +229,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, static void free_mqd(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj) { - struct kfd_dev *kfd = mm->dev; - - if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); - kfree(mqd_mem_obj); - } else { - kfd_gtt_sa_free(mm->dev, mqd_mem_obj); - } + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); } static bool is_occupied(struct mqd_manager *mm, void *mqd, @@ -276,18 +248,22 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, { struct v10_compute_mqd *m; - /* Control stack is located one page after MQD. */ - void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); - m = get_mqd(mqd); + /* Control stack is written backwards, while workgroup context data + * is written forwards. Both starts from m->cp_hqd_cntl_stack_size. + * Current position is at m->cp_hqd_cntl_stack_offset and + * m->cp_hqd_wg_state_offset, respectively. + */ *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - m->cp_hqd_cntl_stack_offset; *save_area_used_size = m->cp_hqd_wg_state_offset - m->cp_hqd_cntl_stack_size; - if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) - return -EFAULT; + /* Control stack is not copied to user mode for GFXv10 because + * it's part of the context save area that is already + * accessible to user mode + */ return 0; } @@ -306,18 +282,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } -static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) -{ - struct v10_compute_mqd *m; - - update_mqd(mm, mqd, q); - - /* TODO: what's the point? update_mqd already does this. */ - m = get_mqd(mqd); - m->cp_hqd_vmid = q->vmid; -} - static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -369,11 +333,7 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, m->sdma_queue_id = q->sdma_queue_id; m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; - - q->is_active = (q->queue_size > 0 && - q->queue_address != 0 && - q->queue_percent > 0 && - !q->is_evicted); + q->is_active = QUEUE_IS_ACTIVE(*q); } /* @@ -421,7 +381,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) return NULL; - mqd = kzalloc(sizeof(*mqd), GFP_NOIO); + mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); if (!mqd) return NULL; @@ -429,7 +389,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: pr_debug("%s@%i\n", __func__, __LINE__); mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; @@ -451,7 +410,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v10_compute_mqd); @@ -461,11 +420,11 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, pr_debug("%s@%i\n", __func__, __LINE__); break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v10_compute_mqd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index d3380c5bdbde..aa9010995eaf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -92,7 +92,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * instead of sub-allocation function. */ if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { - mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); + mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, @@ -302,7 +302,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - m->cp_hqd_cntl_stack_offset; - *save_area_used_size = m->cp_hqd_wg_state_offset; + *save_area_used_size = m->cp_hqd_wg_state_offset - + m->cp_hqd_cntl_stack_size; if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) return -EFAULT; @@ -324,18 +325,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; } -static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) -{ - struct v9_mqd *m; - - update_mqd(mm, mqd, q); - - /* TODO: what's the point? update_mqd already does this. */ - m = get_mqd(mqd); - m->cp_hqd_vmid = q->vmid; -} - static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -443,7 +432,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; mqd->free_mqd = free_mqd; @@ -462,7 +450,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v9_mqd); @@ -471,11 +459,11 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, #endif break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd_hiq; + mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; mqd->mqd_size = sizeof(struct v9_mqd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 7d144f56f421..a5e8ff1e5945 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -312,11 +312,7 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { - struct vi_mqd *m; __update_mqd(mm, mqd, q, MTYPE_UC, 0); - - m = get_mqd(mqd); - m->cp_hqd_vmid = q->vmid; } static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, @@ -425,7 +421,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - case KFD_MQD_TYPE_COMPUTE: mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd; mqd->free_mqd = free_mqd; @@ -453,7 +448,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, #endif break; case KFD_MQD_TYPE_DIQ: - mqd->allocate_mqd = allocate_hiq_mqd; + mqd->allocate_mqd = allocate_mqd; mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd; mqd->load_mqd = load_mqd; @@ -494,7 +489,7 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, mqd = mqd_manager_init_vi(type, dev); if (!mqd) return NULL; - if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE)) + if (type == KFD_MQD_TYPE_CP) mqd->update_mqd = update_mqd_tonga; return mqd; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 2c8624c5b42c..6cabed06ef5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -239,11 +239,12 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: - pm->pmf = &kfd_v9_pm_funcs; - break; case CHIP_NAVI10: - pm->pmf = &kfd_v10_pm_funcs; + case CHIP_NAVI12: + case CHIP_NAVI14: + pm->pmf = &kfd_v9_pm_funcs; break; default: WARN(1, "Unexpected ASIC family %u", @@ -277,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm, size = pm->pmf->set_resources_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { @@ -288,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm, retval = pm->pmf->set_resources(pm, buffer, res); if (!retval) - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); else - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -315,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); mutex_lock(&pm->lock); - retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + retval = kq_acquire_packet_buffer(pm->priv_queue, packet_size_dwords, &rl_buffer); if (retval) goto fail_acquire_packet_buffer; @@ -325,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) if (retval) goto fail_create_runlist; - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); mutex_unlock(&pm->lock); return retval; fail_create_runlist: - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); fail_acquire_packet_buffer: mutex_unlock(&pm->lock); fail_create_runlist_ib: @@ -351,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, size = pm->pmf->query_status_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); @@ -361,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); if (!retval) - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); else - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -380,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, size = pm->pmf->unmap_queues_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); @@ -391,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, reset, sdma_engine); if (!retval) - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); else - pm->priv_queue->ops.rollback_packet(pm->priv_queue); + kq_rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -438,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) size = pm->pmf->query_status_size; mutex_lock(&pm->lock); - pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + kq_acquire_packet_buffer(pm->priv_queue, size / sizeof(uint32_t), (unsigned int **)&buffer); if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); @@ -446,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) goto out; } memset(buffer, 0x55, size); - pm->priv_queue->ops.submit_packet(pm->priv_queue); + kq_submit_packet(pm->priv_queue); pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.", buffer[0], buffer[1], buffer[2], buffer[3], diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 9a4bafb2e175..2de01009f1b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -25,47 +25,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_pm4_headers_ai.h" #include "kfd_pm4_opcodes.h" - -static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_v9(struct kernel_queue *kq); -static void submit_packet_v9(struct kernel_queue *kq); - -void kernel_queue_init_v9(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_v9; - ops->uninitialize = uninitialize_v9; - ops->submit_packet = submit_packet_v9; -} - -static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - int retval; - - retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); - if (retval) - return false; - - kq->eop_gpu_addr = kq->eop_mem->gpu_addr; - kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; - - memset(kq->eop_kernel_addr, 0, PAGE_SIZE); - - return true; -} - -static void uninitialize_v9(struct kernel_queue *kq) -{ - kfd_gtt_sa_free(kq->dev, kq->eop_mem); -} - -static void submit_packet_v9(struct kernel_queue *kq) -{ - *kq->wptr64_kernel = kq->pending_wptr64; - write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, - kq->pending_wptr64); -} +#include "gc/gc_10_1_0_sh_mask.h" static int pm_map_process_v9(struct packet_manager *pm, uint32_t *buffer, struct qcm_process_device *qpd) @@ -90,10 +50,17 @@ static int pm_map_process_v9(struct packet_manager *pm, packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; - packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8); - packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); - packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); + if (qpd->tba_addr) { + packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); + /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is + * not defined, so setting it won't do any harm. + */ + packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8) + | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT; + + packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); + packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); + } packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); @@ -341,35 +308,6 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } - -static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer) -{ - struct pm4_mec_release_mem *packet; - - packet = (struct pm4_mec_release_mem *)buffer; - memset(buffer, 0, sizeof(struct pm4_mec_release_mem)); - - packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, - sizeof(struct pm4_mec_release_mem)); - - packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe; - packet->bitfields2.tcl1_action_ena = 1; - packet->bitfields2.tc_action_ena = 1; - packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru; - - packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low; - packet->bitfields3.int_sel = - int_sel__mec_release_mem__send_interrupt_after_write_confirm; - - packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; - packet->address_hi = upper_32_bits(gpu_addr); - - packet->data_lo = 0; - - return 0; -} - const struct packet_manager_funcs kfd_v9_pm_funcs = { .map_process = pm_map_process_v9, .runlist = pm_runlist_v9, @@ -377,12 +315,12 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, .query_status = pm_query_status_v9, - .release_mem = pm_release_mem_v9, + .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), .runlist_size = sizeof(struct pm4_mes_runlist), .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), .query_status_size = sizeof(struct pm4_mes_query_status), - .release_mem_size = sizeof(struct pm4_mec_release_mem) + .release_mem_size = 0, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 2adaf40027eb..bed4d0ccb6b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -26,47 +26,6 @@ #include "kfd_pm4_headers_vi.h" #include "kfd_pm4_opcodes.h" -static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size); -static void uninitialize_vi(struct kernel_queue *kq); -static void submit_packet_vi(struct kernel_queue *kq); - -void kernel_queue_init_vi(struct kernel_queue_ops *ops) -{ - ops->initialize = initialize_vi; - ops->uninitialize = uninitialize_vi; - ops->submit_packet = submit_packet_vi; -} - -static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, - enum kfd_queue_type type, unsigned int queue_size) -{ - int retval; - - retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); - if (retval != 0) - return false; - - kq->eop_gpu_addr = kq->eop_mem->gpu_addr; - kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; - - memset(kq->eop_kernel_addr, 0, PAGE_SIZE); - - return true; -} - -static void uninitialize_vi(struct kernel_queue *kq) -{ - kfd_gtt_sa_free(kq->dev, kq->eop_mem); -} - -static void submit_packet_vi(struct kernel_queue *kq) -{ - *kq->wptr_kernel = kq->pending_wptr; - write_kernel_doorbell(kq->queue->properties.doorbell_ptr, - kq->pending_wptr); -} - unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) { union PM4_MES_TYPE_3_HEADER header; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index c89326125d71..fc61b5ec068e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -36,6 +36,10 @@ #include <linux/seq_file.h> #include <linux/kref.h> #include <linux/sysfs.h> +#include <linux/device_cgroup.h> +#include <drm/drm_file.h> +#include <drm/drm_drv.h> +#include <drm/drm_device.h> #include <kgd_kfd_interface.h> #include "amd_shared.h" @@ -55,24 +59,21 @@ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these * defines are w.r.t to PAGE_SIZE */ -#define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) +#define KFD_MMAP_TYPE_SHIFT 62 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) -#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT) +#define KFD_MMAP_GPU_ID_SHIFT 46 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ << KFD_MMAP_GPU_ID_SHIFT) #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ & KFD_MMAP_GPU_ID_MASK) -#define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ +#define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ >> KFD_MMAP_GPU_ID_SHIFT) -#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT) -#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK) - /* * When working with cp scheduler we should assign the HIQ manually or via * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot @@ -179,10 +180,6 @@ enum cache_policy { cache_policy_noncoherent }; -#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11) -#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \ - (chip) <= CHIP_NAVI10) || \ - (chip) == CHIP_HAWAII) #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) struct kfd_event_interrupt_class { @@ -230,6 +227,7 @@ struct kfd_dev { const struct kfd_device_info *device_info; struct pci_dev *pdev; + struct drm_device *ddev; unsigned int id; /* topology stub index */ @@ -237,9 +235,10 @@ struct kfd_dev { * KFD. It is aligned for mapping * into user mode */ - size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell - * to HW doorbell, GFX reserved some - * at the start) + size_t doorbell_base_dw_offset; /* Offset from the start of the PCI + * doorbell BAR to the first KFD + * doorbell in dwords. GFX reserves + * the segment before this offset. */ u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells * page used by kernel queue @@ -509,8 +508,7 @@ struct queue { * Please read the kfd_mqd_manager.h description. */ enum KFD_MQD_TYPE { - KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */ - KFD_MQD_TYPE_HIQ, /* for hiq */ + KFD_MQD_TYPE_HIQ = 0, /* for hiq */ KFD_MQD_TYPE_CP, /* for cp queues and diq */ KFD_MQD_TYPE_SDMA, /* for sdma queues */ KFD_MQD_TYPE_DIQ, /* for diq */ @@ -687,7 +685,7 @@ struct kfd_process { /* We want to receive a notification when the mm_struct is destroyed */ struct mmu_notifier mmu_notifier; - unsigned int pasid; + uint16_t pasid; unsigned int doorbell_index; /* @@ -817,7 +815,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); u32 read_kernel_doorbell(u32 __iomem *db); void write_kernel_doorbell(void __iomem *db, u32 value); void write_kernel_doorbell64(void __iomem *db, u64 value); -unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, +unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, struct kfd_process *process, unsigned int doorbell_id); phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, @@ -903,7 +901,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_dev *dev, struct file *f, struct queue_properties *properties, - unsigned int *qid); + unsigned int *qid, + uint32_t *p_doorbell_offset_in_process); int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, struct queue_properties *p); @@ -971,7 +970,6 @@ struct packet_manager_funcs { extern const struct packet_manager_funcs kfd_vi_pm_funcs; extern const struct packet_manager_funcs kfd_v9_pm_funcs; -extern const struct packet_manager_funcs kfd_v10_pm_funcs; int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); void pm_uninit(struct packet_manager *pm); @@ -990,9 +988,6 @@ void pm_release_ib(struct packet_manager *pm); /* Following PM funcs can be shared among VI and AI */ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); -int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, - struct scheduling_resources *res); - uint64_t kfd_get_number_elems(struct kfd_dev *kfd); @@ -1040,6 +1035,21 @@ bool kfd_is_locked(void); void kfd_inc_compute_active(struct kfd_dev *dev); void kfd_dec_compute_active(struct kfd_dev *dev); +/* Cgroup Support */ +/* Check with device cgroup if @kfd device is accessible */ +static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) +{ +#if defined(CONFIG_CGROUP_DEVICE) + struct drm_device *ddev = kfd->ddev; + + return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, + ddev->render->index, + DEVCG_ACC_WRITE | DEVCG_ACC_READ); +#else + return 0; +#endif +} + /* Debugfs */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 40e3fc0c6942..8276601a122f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) list_for_each_entry_safe(pdd, temp, &p->per_device_data, per_device_list) { - pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", + pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", pdd->dev->id, p->pasid); if (pdd->drm_file) { @@ -560,8 +560,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; - offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id)) - << PAGE_SHIFT; + offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); qpd->tba_addr = (int64_t)vm_mmap(filep, 0, KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, MAP_SHARED, offset); @@ -687,6 +686,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, struct kfd_dev *dev) { unsigned int i; + int range_start = dev->shared_resources.non_cp_doorbells_start; + int range_end = dev->shared_resources.non_cp_doorbells_end; if (!KFD_IS_SOC15(dev->device_info->asic_family)) return 0; @@ -698,14 +699,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, return -ENOMEM; /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", + range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); + for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { - if (i >= dev->shared_resources.non_cp_doorbells_start - && i <= dev->shared_resources.non_cp_doorbells_end) { + if (i >= range_start && i <= range_end) { set_bit(i, qpd->doorbell_bitmap); set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, qpd->doorbell_bitmap); - pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i, - i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); } } @@ -1020,7 +1023,7 @@ static void evict_process_worker(struct work_struct *work) */ flush_delayed_work(&p->restore_work); - pr_debug("Started evicting pasid %d\n", p->pasid); + pr_debug("Started evicting pasid 0x%x\n", p->pasid); ret = kfd_process_evict_queues(p); if (!ret) { dma_fence_signal(p->ef); @@ -1029,9 +1032,9 @@ static void evict_process_worker(struct work_struct *work) queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); - pr_debug("Finished evicting pasid %d\n", p->pasid); + pr_debug("Finished evicting pasid 0x%x\n", p->pasid); } else - pr_err("Failed to evict queues of pasid %d\n", p->pasid); + pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); } static void restore_process_worker(struct work_struct *work) @@ -1046,7 +1049,7 @@ static void restore_process_worker(struct work_struct *work) * lifetime of this thread, kfd_process p will be valid */ p = container_of(dwork, struct kfd_process, restore_work); - pr_debug("Started restoring pasid %d\n", p->pasid); + pr_debug("Started restoring pasid 0x%x\n", p->pasid); /* Setting last_restore_timestamp before successful restoration. * Otherwise this would have to be set by KGD (restore_process_bos) @@ -1062,7 +1065,7 @@ static void restore_process_worker(struct work_struct *work) ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, &p->ef); if (ret) { - pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", + pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); @@ -1072,9 +1075,9 @@ static void restore_process_worker(struct work_struct *work) ret = kfd_process_restore_queues(p); if (!ret) - pr_debug("Finished restoring pasid %d\n", p->pasid); + pr_debug("Finished restoring pasid 0x%x\n", p->pasid); else - pr_err("Failed to restore queues of pasid %d\n", p->pasid); + pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); } void kfd_suspend_all_processes(void) @@ -1088,7 +1091,7 @@ void kfd_suspend_all_processes(void) cancel_delayed_work_sync(&p->restore_work); if (kfd_process_evict_queues(p)) - pr_err("Failed to suspend process %d\n", p->pasid); + pr_err("Failed to suspend process 0x%x\n", p->pasid); dma_fence_signal(p->ef); dma_fence_put(p->ef); p->ef = NULL; @@ -1171,7 +1174,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) int idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - seq_printf(m, "Process %d PASID %d:\n", + seq_printf(m, "Process %d PASID 0x%x:\n", p->lead_thread->tgid, p->pasid); mutex_lock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 7e6c3ee82f5b..1152490bbf53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, pr_debug("The new slot id %lu\n", found); if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { - pr_info("Cannot open more queues for process with pasid %d\n", + pr_info("Cannot open more queues for process with pasid 0x%x\n", pqm->process->pasid); return -ENOMEM; } @@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm) pqm->queue_slot_bitmap = NULL; } -static int create_cp_queue(struct process_queue_manager *pqm, +static int init_user_queue(struct process_queue_manager *pqm, struct kfd_dev *dev, struct queue **q, struct queue_properties *q_properties, struct file *f, unsigned int qid) @@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, struct kfd_dev *dev, struct file *f, struct queue_properties *properties, - unsigned int *qid) + unsigned int *qid, + uint32_t *p_doorbell_offset_in_process) { int retval; struct kfd_process_device *pdd; @@ -250,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -271,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } - retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; pqn->q = q; @@ -298,17 +299,20 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("Pasid %d DQM create queue %d failed. ret %d\n", + pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n", pqm->process->pasid, type, retval); goto err_create_queue; } - if (q) + if (q && p_doorbell_offset_in_process) /* Return the doorbell offset within the doorbell page * to the caller so it can be passed up to user mode * (in bytes). + * There are always 1024 doorbells per process, so in case + * of 8-byte doorbells, there are two doorbell pages per + * process. */ - properties->doorbell_off = + *p_doorbell_offset_in_process = (q->properties.doorbell_off * sizeof(uint32_t)) & (kfd_doorbell_process_slice(dev) - 1); @@ -377,7 +381,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { - pr_err("Pasid %d destroy queue %d failed, ret %d\n", + pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", pqm->process->pasid, pqn->q->properties.queue_id, retval); if (retval != -ETIME) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 7551761f2aa9..69bd0628fdc6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; iolink = container_of(attr, struct kfd_iolink_properties, attr); + if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type); sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj); sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min); @@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; mem = container_of(attr, struct kfd_mem_properties, attr); + if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type); sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes); sysfs_show_32bit_prop(buffer, "flags", mem->flags); @@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; cache = container_of(attr, struct kfd_cache_properties, attr); + if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "processor_id_low", cache->processor_id_low); sysfs_show_32bit_prop(buffer, "level", cache->cache_level); @@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (strcmp(attr->name, "gpu_id") == 0) { dev = container_of(attr, struct kfd_topology_device, attr_gpuid); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; return sysfs_show_32bit_val(buffer, dev->gpu_id); } @@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev = container_of(attr, struct kfd_topology_device, attr_name); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; return sysfs_show_str_val(buffer, dev->node_props.name); } dev = container_of(attr, struct kfd_topology_device, attr_props); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "cpu_cores_count", dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, "simd_count", @@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) { struct kfd_topology_device *dev; struct kfd_topology_device *out_dev = NULL; + struct kfd_mem_properties *mem; + struct kfd_cache_properties *cache; + struct kfd_iolink_properties *iolink; down_write(&topology_lock); list_for_each_entry(dev, &topology_device_list, list) { @@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; + + list_for_each_entry(mem, &dev->mem_props, list) + mem->gpu = dev->gpu; + list_for_each_entry(cache, &dev->cache_props, list) + cache->gpu = dev->gpu; + list_for_each_entry(iolink, &dev->io_link_props, list) + iolink->gpu = dev->gpu; break; } } @@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index d4718d58d0f2..15843e0fc756 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -102,6 +102,7 @@ struct kfd_mem_properties { uint32_t flags; uint32_t width; uint32_t mem_clk_max; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; @@ -123,6 +124,7 @@ struct kfd_cache_properties { uint32_t cache_latency; uint32_t cache_type; uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE]; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; @@ -141,6 +143,7 @@ struct kfd_iolink_properties { uint32_t max_bandwidth; uint32_t rec_transfer_size; uint32_t flags; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 71991a28a775..096db863c345 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT menu "Display Engine Configuration" depends on DRM && DRM_AMDGPU @@ -6,43 +6,24 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and Raven ASICs. -config DRM_AMD_DC_DCN1_0 +config DRM_AMD_DC_DCN def_bool n help - RV family support for display engine + Raven, Navi and Renoir family support for display engine -config DRM_AMD_DC_DCN2_0 - bool "DCN 2.0 family" - default y - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN1_0 - help - Choose this option if you want to have - Navi support for display engine - -config DRM_AMD_DC_DCN2_1 - bool "DCN 2.1 family" - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN2_0 - help - Choose this option if you want to have - Renoir support for display engine - -config DRM_AMD_DC_DSC_SUPPORT - bool "DSC support" - default y - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN1_0 - depends on DRM_AMD_DC_DCN2_0 +config DRM_AMD_DC_HDCP + bool "Enable HDCP support in DC" + depends on DRM_AMD_DC help - Choose this option if you want to have - Dynamic Stream Compression support + Choose this option + if you want to support + HDCP authentication config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 496cee000f10..2633de77de5e 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,11 +34,20 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dmub/inc + +ifdef CONFIG_DRM_AMD_DC_HDCP +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp +endif #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 -DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power +DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power dmub/src + +ifdef CONFIG_DRM_AMD_DC_HDCP +DAL_LIBS += modules/hdcp +endif AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 94911871eb9b..9a3b7bf8ab0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o endif +ifdef CONFIG_DRM_AMD_DC_HDCP +AMDGPUDM += amdgpu_dm_hdcp.o +endif + ifneq ($(CONFIG_DEBUG_FS),) AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6bd03470a28d..f2db400a3920 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -30,6 +30,10 @@ #include "dc.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" +#include "dmub/inc/dmub_srv.h" +#include "dc/inc/hw/dmcu.h" +#include "dc/inc/hw/abm.h" +#include "dc/dc_dmub_srv.h" #include "vid.h" #include "amdgpu.h" @@ -37,6 +41,10 @@ #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#include <drm/drm_hdcp.h> +#endif #include "amdgpu_pm.h" #include "amd_shared.h" @@ -67,8 +75,9 @@ #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> +#include <drm/drm_hdcp.h> -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" @@ -83,6 +92,9 @@ #include "modules/power/power_helpers.h" #include "modules/inc/mod_info_packet.h" +#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); + #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -143,6 +155,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state); +static void amdgpu_dm_set_psr_caps(struct dc_link *link); +static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); + + /* * dm_vblank_get_counter * @@ -263,6 +281,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +/** + * dm_pflip_high_irq() - Handle pageflip interrupt + * @interrupt_params: ignored + * + * Handles the pageflip interrupt by notifying all interested parties + * that the pageflip has been completed. + */ static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; @@ -407,6 +432,13 @@ static void dm_vupdate_high_irq(void *interrupt_params) } } +/** + * dm_crtc_high_irq() - Handles CRTC interrupt + * @interrupt_params: ignored + * + * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK + * event handler. + */ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -454,6 +486,70 @@ static void dm_crtc_high_irq(void *interrupt_params) } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +/** + * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs + * @interrupt params - interrupt parameters + * + * Notify DRM's vblank event handler at VSTARTUP + * + * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which: + * * We are close enough to VUPDATE - the point of no return for hw + * * We are in the fixed portion of variable front porch when vrr is enabled + * * We are before VUPDATE, where double-buffered vrr registers are swapped + * + * It is therefore the correct place to signal vblank, send user flip events, + * and update VRR. + */ +static void dm_dcn_crtc_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + struct dm_crtc_state *acrtc_state; + unsigned long flags; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); + + if (!acrtc) + return; + + acrtc_state = to_dm_crtc_state(acrtc->base.state); + + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); + + amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + drm_crtc_handle_vblank(&acrtc->base); + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + + if (acrtc_state->vrr_params.supported && + acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + } + + if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { + if (acrtc->event) { + drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); + acrtc->event = NULL; + drm_crtc_vblank_put(&acrtc->base); + } + acrtc->pflip_status = AMDGPU_FLIP_NONE; + } + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} +#endif + static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -643,14 +739,160 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) } } +static int dm_dmub_hw_init(struct amdgpu_device *adev) +{ + const unsigned int psp_header_bytes = 0x100; + const unsigned int psp_footer_bytes = 0x100; + const struct dmcub_firmware_header_v1_0 *hdr; + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + const struct firmware *dmub_fw = adev->dm.dmub_fw; + struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; + struct abm *abm = adev->dm.dc->res_pool->abm; + struct dmub_srv_region_params region_params; + struct dmub_srv_region_info region_info; + struct dmub_srv_fb_params fb_params; + struct dmub_srv_fb_info fb_info; + struct dmub_srv_hw_params hw_params; + enum dmub_status status; + const unsigned char *fw_inst_const, *fw_bss_data; + uint32_t i; + int r; + bool has_hw_support; + + if (!dmub_srv) + /* DMUB isn't supported on the ASIC. */ + return 0; + + if (!dmub_fw) { + /* Firmware required for DMUB support. */ + DRM_ERROR("No firmware provided for DMUB.\n"); + return -EINVAL; + } + + status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error checking HW support for DMUB: %d\n", status); + return -EINVAL; + } + + if (!has_hw_support) { + DRM_INFO("DMUB unsupported on ASIC\n"); + return 0; + } + + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; + + /* Calculate the size of all the regions for the DMUB service. */ + memset(®ion_params, 0, sizeof(region_params)); + + region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - + psp_header_bytes - psp_footer_bytes; + region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size; + + status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, + ®ion_info); + + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB region info: %d\n", status); + return -EINVAL; + } + + /* + * Allocate a framebuffer based on the total size of all the regions. + * TODO: Move this into GART. + */ + r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); + if (r) + return r; + + /* Rebase the regions on the framebuffer address. */ + memset(&fb_params, 0, sizeof(fb_params)); + fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; + fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; + fb_params.region_info = ®ion_info; + + status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + return -EINVAL; + } + + fw_inst_const = dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + psp_header_bytes; + + fw_bss_data = dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + le32_to_cpu(hdr->inst_const_bytes); + + /* Copy firmware and bios info into FB memory. */ + memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, + region_params.inst_const_size); + memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, + region_params.bss_data_size); + memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr, + adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size); + + /* Initialize hardware. */ + memset(&hw_params, 0, sizeof(hw_params)); + hw_params.fb_base = adev->gmc.fb_start; + hw_params.fb_offset = adev->gmc.aper_base; + + if (dmcu) + hw_params.psp_version = dmcu->psp_version; + + for (i = 0; i < fb_info.num_fb; ++i) + hw_params.fb[i] = &fb_info.fb[i]; + + status = dmub_srv_hw_init(dmub_srv, &hw_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error initializing DMUB HW: %d\n", status); + return -EINVAL; + } + + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + + /* Init DMCU and ABM if available. */ + if (dmcu && abm) { + dmcu->funcs->dmcu_init(dmcu); + abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); + } + + adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); + if (!adev->dm.dc->ctx->dmub_srv) { + DRM_ERROR("Couldn't allocate DC DMUB server!\n"); + return -ENOMEM; + } + + DRM_INFO("DMUB hardware initialized: version=0x%08X\n", + adev->dm.dmcub_fw_version); + + return 0; +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct dc_callback_init init_params; +#endif + int r; + adev->dm.ddev = adev->ddev; adev->dm.adev = adev; /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&init_params, 0, sizeof(init_params)); +#endif mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); @@ -688,7 +930,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) */ if (adev->flags & AMD_IS_APU && adev->asic_type >= CHIP_CARRIZO && - adev->asic_type <= CHIP_RAVEN) + adev->asic_type < CHIP_RAVEN) init_data.flags.gpu_vm_support = true; if (amdgpu_dc_feature_mask & DC_FBC_MASK) @@ -697,11 +939,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) init_data.flags.multi_mon_pp_mclk_switch = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) + init_data.flags.disable_fractional_pwm = true; + init_data.flags.power_down_display_on_boot = true; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 init_data.soc_bounding_box = adev->dm.soc_bounding_box; -#endif /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -713,6 +956,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + dc_hardware_init(adev->dm.dc); + + r = dm_dmub_hw_init(adev); + if (r) { + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + goto error; + } + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -723,6 +974,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) { + adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); + + if (!adev->dm.hdcp_workqueue) + DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + + dc_init_callbacks(adev->dm.dc, &init_params); + } +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -764,6 +1027,25 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->dm.hdcp_workqueue) { + hdcp_destroy(adev->dm.hdcp_workqueue); + adev->dm.hdcp_workqueue = NULL; + } + + if (adev->dm.dc) + dc_deinit_callbacks(adev->dm.dc); +#endif + if (adev->dm.dc->ctx->dmub_srv) { + dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); + adev->dm.dc->ctx->dmub_srv = NULL; + } + + if (adev->dm.dmub_bo) + amdgpu_bo_free_kernel(&adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -874,9 +1156,99 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; } +static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) +{ + struct amdgpu_device *adev = ctx; + + return dm_read_reg(adev->dm.dc->ctx, address); +} + +static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, + uint32_t value) +{ + struct amdgpu_device *adev = ctx; + + return dm_write_reg(adev->dm.dc->ctx, address, value); +} + +static int dm_dmub_sw_init(struct amdgpu_device *adev) +{ + struct dmub_srv_create_params create_params; + const struct dmcub_firmware_header_v1_0 *hdr; + const char *fw_name_dmub; + enum dmub_asic dmub_asic; + enum dmub_status status; + int r; + + switch (adev->asic_type) { + case CHIP_RENOIR: + dmub_asic = DMUB_ASIC_DCN21; + fw_name_dmub = FIRMWARE_RENOIR_DMUB; + break; + + default: + /* ASIC doesn't support DMUB. */ + return 0; + } + + adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); + if (!adev->dm.dmub_srv) { + DRM_ERROR("Failed to allocate DMUB service!\n"); + return -ENOMEM; + } + + memset(&create_params, 0, sizeof(create_params)); + create_params.user_ctx = adev; + create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; + create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; + create_params.asic = dmub_asic; + + status = dmub_srv_create(adev->dm.dmub_srv, &create_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error creating DMUB service: %d\n", status); + return -EINVAL; + } + + r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); + if (r) { + DRM_ERROR("DMUB firmware loading failed: %d\n", r); + return 0; + } + + r = amdgpu_ucode_validate(adev->dm.dmub_fw); + if (r) { + DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); + return 0; + } + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + DRM_WARN("Only PSP firmware loading is supported for DMUB\n"); + return 0; + } + + hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = + AMDGPU_UCODE_ID_DMCUB; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); + + adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); + + DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + adev->dm.dmcub_fw_version); + + return 0; +} + static int dm_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = dm_dmub_sw_init(adev); + if (r) + return r; return load_dmcu_fw(adev); } @@ -885,6 +1257,16 @@ static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->dm.dmub_srv) { + dmub_srv_destroy(adev->dm.dmub_srv); + adev->dm.dmub_srv = NULL; + } + + if (adev->dm.dmub_fw) { + release_firmware(adev->dm.dmub_fw); + adev->dm.dmub_fw = NULL; + } + if(adev->dm.fw_dmcu) { release_firmware(adev->dm.fw_dmcu); adev->dm.fw_dmcu = NULL; @@ -897,27 +1279,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; int ret = 0; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", - aconnector, aconnector->base.base.id); + aconnector, + aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to start MST\n"); - ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; - return ret; - } + aconnector->dc_link->type = + dc_connection_single; + break; } + } } + drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } @@ -940,6 +1324,11 @@ static int dm_late_init(void *handle) params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; + /* todo will enable for navi10 */ if (adev->asic_type <= CHIP_RAVEN) { ret = dmcu_load_iram(dmcu, params); @@ -955,14 +1344,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; int ret; bool need_hotplug = false; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_port) @@ -973,15 +1361,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr); + ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { drm_dp_mst_topology_mgr_set_mst(mgr, false); need_hotplug = true; } } } - - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_connector_list_iter_end(&iter); if (need_hotplug) drm_kms_helper_hotplug_event(dev); @@ -989,7 +1376,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -1019,7 +1406,7 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that @@ -1163,6 +1550,7 @@ static int dm_resume(void *handle) struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state; @@ -1185,17 +1573,18 @@ static int dm_resume(void *handle) /* program HPD filter */ dc_resume(dm->dc); - /* On resume we need to rewrite the MSTM control bits to enamble MST*/ - s3_handle_mst(ddev, false); - /* * early enable HPD Rx IRQ, should be done before set mode as short * pulse interrupts are used for MST */ amdgpu_dm_irq_resume_early(adev); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ + s3_handle_mst(ddev, false); + /* Do detection*/ - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); /* @@ -1223,6 +1612,7 @@ static int dm_resume(void *handle) amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); } + drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) @@ -1438,6 +1828,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; +#ifdef CONFIG_DRM_AMD_DC_HDCP + /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ + if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +#endif } mutex_unlock(&dev->mode_config.mutex); @@ -1452,6 +1847,9 @@ static void handle_hpd_irq(void *param) struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct amdgpu_device *adev = dev->dev_private; +#endif /* * In case of failure or MST no need to update connector status or notify the OS @@ -1459,6 +1857,10 @@ static void handle_hpd_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); +#endif if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1577,6 +1979,12 @@ static void handle_hpd_rx_irq(void *param) struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + union hpd_irq_data hpd_irq_data; + struct amdgpu_device *adev = dev->dev_private; + + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); +#endif /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -1586,7 +1994,12 @@ static void handle_hpd_rx_irq(void *param) if (dc_link->type != dc_connection_mst_branch) mutex_lock(&aconnector->hpd_lock); + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) && +#else if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && +#endif !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_sink(dc_link, &new_connection_type)) @@ -1621,6 +2034,10 @@ static void handle_hpd_rx_irq(void *param) drm_kms_helper_hotplug_event(dev); } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); +#endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) dm_handle_hpd_rx_irq(aconnector); @@ -1775,7 +2192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) return 0; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* Register IRQ sources and initialize IRQ callbacks */ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) { @@ -1821,35 +2238,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) c_irq_params->irq_src = int_params.irq_source; amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_crtc_high_irq, c_irq_params); - } - - /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to - * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx - * to trigger at end of each vblank, regardless of state of the lock, - * matching DCE behaviour. - */ - for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; - i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; - i++) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); - - if (r) { - DRM_ERROR("Failed to add vupdate irq id!\n"); - return r; - } - - int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; - int_params.irq_source = - dc_interrupt_to_irq_source(dc, i, 0); - - c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; - - c_irq_params->adev = adev; - c_irq_params->irq_src = int_params.irq_source; - - amdgpu_dm_irq_register_interrupt(adev, &int_params, - dm_vupdate_high_irq, c_irq_params); + dm_dcn_crtc_high_irq, c_irq_params); } /* Use GRPH_PFLIP interrupt */ @@ -2334,6 +2723,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); + if (amdgpu_dc_feature_mask & DC_PSR_MASK) + amdgpu_dm_set_psr_caps(link); } @@ -2362,16 +2753,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI12: case CHIP_NAVI10: case CHIP_NAVI14: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: -#endif if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -2517,14 +2904,13 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case CHIP_RAVEN: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: case CHIP_NAVI12: adev->mode_info.num_crtc = 6; @@ -2536,14 +2922,11 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case CHIP_RENOIR: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; break; -#endif default: DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; @@ -2836,14 +3219,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI14 || adev->asic_type == CHIP_NAVI12 || -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) adev->asic_type == CHIP_RENOIR || -#endif adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = @@ -3161,12 +3540,26 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, - const struct drm_connector_state *state) + const struct drm_connector_state *state, + bool is_y420) { - uint8_t bpc = (uint8_t)connector->display_info.bpc; + uint8_t bpc; - /* Assume 8 bpc by default if no bpc is specified. */ - bpc = bpc ? bpc : 8; + if (is_y420) { + bpc = 8; + + /* Cap display bpc based on HDMI 2.0 HF-VSDB */ + if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) + bpc = 16; + else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) + bpc = 12; + else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) + bpc = 10; + } else { + bpc = (uint8_t)connector->display_info.bpc; + /* Assume 8 bpc by default if no bpc is specified. */ + bpc = bpc ? bpc : 8; + } if (!state) state = connector->state; @@ -3311,8 +3704,12 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct hdmi_vendor_infoframe hv_frame; + struct hdmi_avi_infoframe avi_frame; - memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + memset(&hv_frame, 0, sizeof(hv_frame)); + memset(&avi_frame, 0, sizeof(avi_frame)); timing_out->h_border_left = 0; timing_out->h_border_right = 0; @@ -3322,6 +3719,9 @@ static void fill_stream_properties_from_drm_display_mode( if (drm_mode_is_420_only(info, mode_in) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if (drm_mode_is_420_also(info, mode_in) + && aconnector->force_yuv420_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -3330,7 +3730,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; timing_out->display_color_depth = convert_color_depth_from_display_info( - connector, connector_state); + connector, connector_state, + (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)); timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; @@ -3346,6 +3747,13 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; } + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + timing_out->vic = avi_frame.video_code; + drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + timing_out->hdmi_vic = hv_frame.vic; + } + timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; timing_out->h_sync_width = @@ -3535,10 +3943,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_dec_dpcd_caps dsc_caps; - uint32_t link_bandwidth_kbps; #endif + uint32_t link_bandwidth_kbps; struct dc_sink *sink = NULL; if (aconnector == NULL) { @@ -3566,6 +3974,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream->dm_stream_context = aconnector; + stream->timing.flags.LTE_340MCSC_SCRAMBLE = + drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { @@ -3610,25 +4021,28 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_stream_properties_from_drm_display_mode(stream, &mode, &aconnector->base, con_state, old_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT stream->timing.flags.DSC = 0; if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { +#if defined(CONFIG_DRM_AMD_DC_DCN) dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, &dsc_caps); +#endif link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); +#if defined(CONFIG_DRM_AMD_DC_DCN) if (dsc_caps.is_dsc_supported) - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], &dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) stream->timing.flags.DSC = 1; - } #endif + } update_stream_scaling_settings(&mode, dm_state, stream); @@ -3639,6 +4053,20 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); + if (stream->link->psr_feature_enabled) { + struct dc *core_dc = stream->link->ctx->dc; + + if (dc_is_dmcu_initialized(core_dc)) { + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + stream->psr_version = dmcu->dmcu_version.psr_version; + mod_build_vsc_infopacket(stream, + &stream->vsc_infopacket, + &stream->use_vsc_sdp_for_colorimetry); + } + } finish: dc_sink_release(sink); @@ -3727,6 +4155,10 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) struct amdgpu_device *adev = crtc->dev->dev_private; int rc; + /* Do not set vupdate for DCN hardware */ + if (adev->family > AMDGPU_FAMILY_AI) + return 0; + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -3970,7 +4402,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_hborder = 0; state->underscan_vborder = 0; state->base.max_requested_bpc = 8; - + state->vcpi_slots = 0; + state->pbn = 0; if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) state->abm_level = amdgpu_dm_abm_level; @@ -3998,7 +4431,8 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) new_state->underscan_enable = state->underscan_enable; new_state->underscan_hborder = state->underscan_hborder; new_state->underscan_vborder = state->underscan_vborder; - + new_state->vcpi_slots = state->vcpi_slots; + new_state->pbn = state->pbn; return &new_state->base; } @@ -4114,8 +4548,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec result = MODE_OK; else DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", - mode->vdisplay, mode->hdisplay, + mode->vdisplay, mode->clock, dc_result); @@ -4395,10 +4829,68 @@ static void dm_encoder_helper_disable(struct drm_encoder *encoder) } +static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) +{ + switch (display_color_depth) { + case COLOR_DEPTH_666: + return 6; + case COLOR_DEPTH_888: + return 8; + case COLOR_DEPTH_101010: + return 10; + case COLOR_DEPTH_121212: + return 12; + case COLOR_DEPTH_141414: + return 14; + case COLOR_DEPTH_161616: + return 16; + default: + break; + } + return 0; +} + static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector = conn_state->connector; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + enum dc_color_depth color_depth; + int clock, bpp = 0; + bool is_y420 = false; + + if (!aconnector->port || !aconnector->dc_sink) + return 0; + + mst_port = aconnector->port; + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!crtc_state->connectors_changed && !crtc_state->mode_changed) + return 0; + + if (!state->duplicated) { + is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && + aconnector->force_yuv420_output; + color_depth = convert_color_depth_from_display_info(connector, conn_state, + is_y420); + bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; + clock = adjusted_mode->clock; + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp); + } + dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, + mst_mgr, + mst_port, + dm_new_connector_state->pbn); + if (dm_new_connector_state->vcpi_slots < 0) { + DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); + return dm_new_connector_state->vcpi_slots; + } return 0; } @@ -5088,6 +5580,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_vrr_capable_property( &aconnector->base); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + drm_connector_attach_content_protection_property(&aconnector->base, true); +#endif } } @@ -5331,6 +5827,48 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, return false; } +#ifdef CONFIG_DRM_AMD_DC_HDCP +static bool is_content_protection_different(struct drm_connector_state *state, + const struct drm_connector_state *old_state, + const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (old_state->hdcp_content_type != state->hdcp_content_type && + state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + return true; + } + + /* CP is being re enabled, ignore this */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + return false; + } + + /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled + * hot-plug, headless s3, dpms + */ + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && + aconnector->dc_sink != NULL) + return true; + + if (old_state->content_protection == state->content_protection) + return false; + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + return true; + + return false; +} + +#endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -5672,6 +6210,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; + bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -5717,6 +6256,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane = dm_new_plane_state->dc_state; + if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) + swizzle = false; + bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; @@ -5871,6 +6413,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Update the planes if changed or disable if we don't have any. */ if ((planes_count || acrtc_state->active_planes == 0) && acrtc_state->stream) { + bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; @@ -5906,14 +6449,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &acrtc_state->vrr_params.adjust); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + dc_commit_updates_for_stream(dm->dc, bundle->surface_updates, planes_count, acrtc_state->stream, &bundle->stream_update, dc_state); + + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->psr_version && + !acrtc_state->stream->link->psr_feature_enabled) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_feature_enabled && + !acrtc_state->stream->link->psr_allow_active && + swizzle) { + amdgpu_dm_psr_enable(acrtc_state->stream); + } + mutex_unlock(&dm->dc_lock); } @@ -6222,10 +6780,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc->hwmode = new_crtc_state->mode; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); - /* i.e. reset mode */ - if (dm_old_crtc_state->stream) + if (dm_old_crtc_state->stream) { + if (dm_old_crtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(dm_old_crtc_state->stream); + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + } } } /* for_each_crtc_in_state() */ @@ -6255,6 +6816,34 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + new_crtc_state = NULL; + + if (acrtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + continue; + } + + if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + hdcp_update_display( + adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, + new_con_state->hdcp_content_type, + new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true + : false); + } +#endif /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -6294,9 +6883,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!scaling_changed && !abm_changed && !hdr_changed) continue; + stream_update.stream = dm_new_crtc_state->stream; if (scaling_changed) { update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, - dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); + dm_new_con_state, dm_new_crtc_state->stream); stream_update.src = dm_new_crtc_state->stream->src; stream_update.dst = dm_new_crtc_state->stream->dst; @@ -7041,7 +7631,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, int i, j, num_plane, ret = 0; struct drm_plane_state *old_plane_state, *new_plane_state; struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; - struct drm_crtc *new_plane_crtc, *old_plane_crtc; + struct drm_crtc *new_plane_crtc; struct drm_plane *plane; struct drm_crtc *crtc; @@ -7087,7 +7677,6 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, uint64_t tiling_flags; new_plane_crtc = new_plane_state->crtc; - old_plane_crtc = old_plane_state->crtc; new_dm_plane_state = to_dm_plane_state(new_plane_state); old_dm_plane_state = to_dm_plane_state(old_plane_state); @@ -7165,7 +7754,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - + stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. @@ -7343,6 +7932,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + /* Perform validation of MST topology in the state*/ + ret = drm_dp_mst_atomic_check(state); + if (ret) + goto fail; + if (state->legacy_cursor_update) { /* * This is a fast cursor update coming from the plane update @@ -7576,3 +8170,92 @@ update: freesync_capable); } +static void amdgpu_dm_set_psr_caps(struct dc_link *link) +{ + uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return; + if (link->type == dc_connection_none) + return; + if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + dpcd_data, sizeof(dpcd_data))) { + link->psr_feature_enabled = dpcd_data[0] ? true:false; + DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); + } +} + +/* + * amdgpu_dm_link_setup_psr() - configure psr link + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) +{ + struct dc_link *link = NULL; + struct psr_config psr_config = {0}; + struct psr_context psr_context = {0}; + struct dc *dc = NULL; + bool ret = false; + + if (stream == NULL) + return false; + + link = stream->link; + dc = link->ctx->dc; + + psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; + + if (psr_config.psr_version > 0) { + psr_config.psr_exit_link_training_required = 0x1; + psr_config.psr_frame_capture_indication_req = 0; + psr_config.psr_rfb_setup_time = 0x37; + psr_config.psr_sdp_transmit_line_num_deadline = 0x20; + psr_config.allow_smu_optimizations = 0x0; + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + + } + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); + + return ret; +} + +/* + * amdgpu_dm_psr_enable() - enable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + struct dc_static_screen_events triggers = {0}; + + DRM_DEBUG_DRIVER("Enabling psr...\n"); + + triggers.cursor_update = true; + triggers.overlay_update = true; + triggers.surface_update = true; + + dc_stream_set_static_screen_events(link->ctx->dc, + &stream, 1, + &triggers); + + return dc_link_set_psr_allow_active(link, true, false); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +{ + + DRM_DEBUG_DRIVER("Disabling psr...\n"); + + return dc_link_set_psr_allow_active(stream->link, false, true); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c8c525a2b505..a8fc90a927d6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -57,6 +57,8 @@ struct amdgpu_device; struct drm_device; struct amdgpu_dm_irq_handler_data; struct dc; +struct amdgpu_bo; +struct dmub_srv; struct common_irq_params { struct amdgpu_device *adev; @@ -108,6 +110,12 @@ struct amdgpu_dm_backlight_caps { * @display_indexes_num: Max number of display streams supported * @irq_handler_list_table_lock: Synchronizes access to IRQ tables * @backlight_dev: Backlight control device + * @backlight_link: Link on which to control backlight + * @backlight_caps: Capabilities of the backlight device + * @freesync_module: Module handling freesync calculations + * @fw_dmcu: Reference to DMCU firmware + * @dmcu_fw_version: Version of the DMCU firmware + * @soc_bounding_box: SOC bounding box values provided by gpu_info FW * @cached_state: Caches device atomic state for suspend/resume * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info */ @@ -116,6 +124,50 @@ struct amdgpu_display_manager { struct dc *dc; /** + * @dmub_srv: + * + * DMUB service, used for controlling the DMUB on hardware + * that supports it. The pointer to the dmub_srv will be + * NULL on hardware that does not support it. + */ + struct dmub_srv *dmub_srv; + + /** + * @dmub_fw: + * + * DMUB firmware, required on hardware that has DMUB support. + */ + const struct firmware *dmub_fw; + + /** + * @dmub_bo: + * + * Buffer object for the DMUB. + */ + struct amdgpu_bo *dmub_bo; + + /** + * @dmub_bo_gpu_addr: + * + * GPU virtual address for the DMUB buffer object. + */ + u64 dmub_bo_gpu_addr; + + /** + * @dmub_bo_cpu_addr: + * + * CPU address for the DMUB buffer object. + */ + void *dmub_bo_cpu_addr; + + /** + * @dmcub_fw_version: + * + * DMCUB firmware version. + */ + uint32_t dmcub_fw_version; + + /** * @cgs_device: * * The Common Graphics Services device. It provides an interface for @@ -128,7 +180,7 @@ struct amdgpu_display_manager { u16 display_indexes_num; /** - * @atomic_obj + * @atomic_obj: * * In combination with &dm_atomic_state it helps manage * global atomic state that doesn't map cleanly into existing @@ -225,6 +277,9 @@ struct amdgpu_display_manager { struct amdgpu_dm_backlight_caps backlight_caps; struct mod_freesync *freesync_module; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct hdcp_workqueue *hdcp_workqueue; +#endif struct drm_atomic_state *cached_state; @@ -232,13 +287,13 @@ struct amdgpu_display_manager { const struct firmware *fw_dmcu; uint32_t dmcu_fw_version; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** + * @soc_bounding_box: + * * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; struct amdgpu_dm_connector { @@ -287,6 +342,7 @@ struct amdgpu_dm_connector { uint32_t debugfs_dpcd_address; uint32_t debugfs_dpcd_size; #endif + bool force_yuv420_output; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -347,6 +403,8 @@ struct dm_connector_state { bool underscan_enable; bool freesync_capable; uint8_t abm_level; + int vcpi_slots; + uint64_t pbn; }; #define to_dm_connector_state(x)\ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b43bb7f90e4e..2233d293a707 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL); + dc_gamma_release(&gamma); + return res ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index a549c7c717dd..eaad9099bc0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, } /* Configure dithering */ - if (!dm_need_crc_dither(source)) + if (!dm_need_crc_dither(source)) { dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); - else + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_DISABLE); + } else { dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_AUTO); + } unlock: mutex_unlock(&adev->dm.dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f3dfb2887ae0..f81d3439ee8c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -657,6 +657,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us dc_link_set_test_pattern( link, test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_RGB, &link_training_settings, custom_pattern, 10); @@ -942,6 +943,52 @@ static const struct { {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops} }; +/* + * Force YUV420 output if available from the given mode + */ +static int force_yuv420_output_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + + connector->force_yuv420_output = (bool)val; + + return 0; +} + +/* + * Check if YUV420 is forced when available from the given mode + */ +static int force_yuv420_output_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + + *val = connector->force_yuv420_output; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, + force_yuv420_output_set, "%llu\n"); + +/* + * Read PSR state + */ +static int psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + uint32_t psr_state = 0; + + dc_link_get_psr_state(link, &psr_state); + + *val = psr_state; + + return 0; +} + + +DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -955,6 +1002,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) dp_debugfs_entries[i].fops); } } + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + + debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, + &force_yuv420_output_fops); + } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c new file mode 100644 index 000000000000..ae329335dfcc --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -0,0 +1,367 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_hdcp.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dm_helpers.h" +#include <drm/drm_hdcp.h> + +static bool +lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + + struct dc_link *link = handle; + struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); +} + +static bool +lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); +} + +static void process_output(struct hdcp_workqueue *hdcp_work) +{ + struct mod_hdcp_output output = hdcp_work->output; + + if (output.callback_stop) + cancel_delayed_work(&hdcp_work->callback_dwork); + + if (output.callback_needed) + schedule_delayed_work(&hdcp_work->callback_dwork, + msecs_to_jiffies(output.callback_delay)); + + if (output.watchdog_timer_stop) + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + if (output.watchdog_timer_needed) + schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, + msecs_to_jiffies(output.watchdog_timer_delay)); + + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0)); +} + +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector, + uint8_t content_type, + bool enable_encryption) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + struct mod_hdcp_display_query query; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + query.display = NULL; + mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); + + if (query.display != NULL) { + memcpy(display, query.display, sizeof(struct mod_hdcp_display)); + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + + if (enable_encryption) { + display->adjust.disable = 0; + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) + hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; + + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display->adjust.disable = 1; + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); + } + + display->state = MOD_HDCP_DISPLAY_ACTIVE; + } + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + schedule_work(&hdcp_w->cpirq_work); +} + + + + +static void event_callback(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + +} +static void event_property_update(struct work_struct *work) +{ + + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct drm_device *dev = hdcp_work->aconnector->base.dev; + long ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + + if (aconnector->base.state->commit) { + ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + + if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && + hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && + hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else { + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + } + + + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void event_property_validate(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = + container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); + struct mod_hdcp_display_query query; + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + + if (!aconnector) + return; + + mutex_lock(&hdcp_work->mutex); + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + + if (query.encryption_status != hdcp_work->encryption_status) { + hdcp_work->encryption_status = query.encryption_status; + schedule_work(&hdcp_work->property_update_work); + } + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_watchdog_timer(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), + struct hdcp_workqueue, + watchdog_timer_dwork); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + +static void event_cpirq(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + + +void hdcp_destroy(struct hdcp_workqueue *hdcp_work) +{ + int i = 0; + + for (i = 0; i < hdcp_work->max_link; i++) { + cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); + cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + } + + kfree(hdcp_work); + +} + +static void update_config(void *handle, struct cp_psp_stream_config *config) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + + if (aconnector->dc_sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + + display->controller = CONTROLLER_ID_D0 + config->otg_inst; + display->dig_fe = config->stream_enc_inst; + link->dig_be = config->link_enc_inst; + link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + display->adjust.disable = 1; + link->adjust.auth_delay = 2; + + hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); +} + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) +{ + + int max_caps = dc->caps.max_links; + struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); + int i = 0; + + if (hdcp_work == NULL) + goto fail_alloc_context; + + hdcp_work->max_link = max_caps; + + for (i = 0; i < max_caps; i++) { + + mutex_init(&hdcp_work[i].mutex); + + INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); + INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); + INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); + INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); + INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); + + hdcp_work[i].hdcp.config.psp.handle = psp_context; + hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); + hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; + hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + } + + cp_psp->funcs.update_stream_config = update_config; + cp_psp->handle = hdcp_work; + + return hdcp_work; + +fail_alloc_context: + kfree(hdcp_work); + + return NULL; + + + +} + + + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h new file mode 100644 index 000000000000..6abde86bce4a --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -0,0 +1,69 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ +#define AMDGPU_DM_AMDGPU_DM_HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp.h" +#include "dc.h" +#include "dm_cp_psp.h" + +struct mod_hdcp; +struct mod_hdcp_link; +struct mod_hdcp_display; +struct cp_psp; + +struct hdcp_workqueue { + struct work_struct cpirq_work; + struct work_struct property_update_work; + struct delayed_work callback_dwork; + struct delayed_work watchdog_timer_dwork; + struct delayed_work property_validate_dwork; + struct amdgpu_dm_connector *aconnector; + struct mutex mutex; + + struct mod_hdcp hdcp; + struct mod_hdcp_output output; + struct mod_hdcp_display display; + struct mod_hdcp_link link; + + enum mod_hdcp_encryption_status encryption_status; + uint8_t max_link; +}; + +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector, + uint8_t content_type, + bool enable_encryption); + +void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_destroy(struct hdcp_workqueue *work); + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); + +#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ee1dc75f5ddc..66f266a5e10b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -97,11 +97,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( (struct edid *) edid->raw_edid); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); - if (sad_count <= 0) { - DRM_INFO("SADs count is: %d, don't need to read it\n", - sad_count); + if (sad_count <= 0) return result; - } edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; for (i = 0; i < edid_caps->audio_mode_count; ++i) { @@ -183,19 +180,22 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( bool enable) { struct amdgpu_dm_connector *aconnector; + struct dm_connector_state *dm_conn_state; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int slots = 0; bool ret; - int clock; - int bpp = 0; - int pbn = 0; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + /* Accessing the connector state is required for vcpi_slots allocation + * and directly relies on behaviour in commit check + * that blocks before commit guaranteeing that the state + * is not gonna be swapped while still in use in commit tail */ if (!aconnector || !aconnector->mst_port) return false; + dm_conn_state = to_dm_connector_state(aconnector->base.state); + mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) @@ -204,42 +204,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( mst_port = aconnector->port; if (enable) { - clock = stream->timing.pix_clk_100hz / 10; - - switch (stream->timing.display_color_depth) { - - case COLOR_DEPTH_666: - bpp = 6; - break; - case COLOR_DEPTH_888: - bpp = 8; - break; - case COLOR_DEPTH_101010: - bpp = 10; - break; - case COLOR_DEPTH_121212: - bpp = 12; - break; - case COLOR_DEPTH_141414: - bpp = 14; - break; - case COLOR_DEPTH_161616: - bpp = 16; - break; - default: - ASSERT(bpp != 0); - break; - } - - bpp = bpp * 3; - - /* TODO need to know link rate */ - - pbn = drm_dp_calc_pbn_mode(clock, bpp); - - slots = drm_dp_find_vcpi_slots(mst_mgr, pbn); - ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots); + ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, + dm_conn_state->pbn, + dm_conn_state->vcpi_slots); if (!ret) return false; @@ -282,7 +250,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( * Polls for ACT (allocation change trigger) handled and sends * ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream) { @@ -293,19 +261,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) - return false; + return ACT_FAILED; mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) - return false; + return ACT_FAILED; ret = drm_dp_check_act_status(mst_mgr); if (ret) - return false; + return ACT_FAILED; - return true; + return ACT_SUCCESS; } bool dm_helpers_dp_mst_send_payload_allocation( @@ -541,7 +509,6 @@ bool dm_helpers_submit_i2c( return result; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, @@ -552,7 +519,6 @@ bool dm_helpers_dp_write_dsc_enable( return dm_helpers_dp_write_dpcd(ctx, stream->sink->link, DP_DSC_ENABLE, &enable_dsc, 1); } -#endif bool dm_helpers_is_dp_sink_present(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index fa5d503d379c..64445c4cc4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); @@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) true); } } + drm_connector_list_iter_end(&iter); } /** @@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; @@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) false); } } + drm_connector_list_iter_end(&iter); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5ec14efd4d8c..81367c869134 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,7 +36,9 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" - +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -113,6 +115,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = -EIO; break; case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: result = -EBUSY; break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: @@ -123,31 +126,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, return result; } -static enum drm_connector_status -dm_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - - enum drm_connector_status status = - drm_dp_mst_detect_port( - connector, - &master->mst_mgr, - aconnector->port); - - return status; -} - static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; - if (amdgpu_dm_connector->edid) { - kfree(amdgpu_dm_connector->edid); - amdgpu_dm_connector->edid = NULL; - } + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); @@ -163,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); struct drm_dp_mst_port *port = amdgpu_dm_connector->port; +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); + amdgpu_dm_connector->debugfs_dpcd_address = 0; + amdgpu_dm_connector->debugfs_dpcd_size = 0; +#endif + return drm_dp_mst_connector_late_register(connector, port); } @@ -177,7 +169,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - .detect = dm_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, @@ -245,17 +236,61 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +static struct drm_encoder * +dm_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *connector_state) { - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + return &to_amdgpu_dm_connector(connector)->mst_encoder->base; +} - return &amdgpu_dm_connector->mst_encoder->base; +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); +} + +static int dm_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *new_conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(state, connector); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_crtc_state *new_crtc_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + + mst_port = aconnector->port; + mst_mgr = &aconnector->mst_port->mst_mgr; + + if (!old_conn_state->crtc) + return 0; + + if (new_conn_state->crtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + if (!new_crtc_state || + !drm_atomic_crtc_needs_modeset(new_crtc_state) || + new_crtc_state->enable) + return 0; + } + + return drm_dp_atomic_release_vcpi_slots(state, + mst_mgr, + mst_port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, - .best_encoder = dm_mst_best_encoder, + .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, + .atomic_check = dm_dp_mst_atomic_check, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -417,6 +452,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_dp_aux_register(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, &aconnector->base); + + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + return; + aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index f4cfa0caeba8..a2e1a73f66b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -342,10 +342,11 @@ bool dm_pp_get_clock_levels_by_type( if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clks)) { - /* Error in pplib. Provide default values. */ + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); return true; } - } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) { if (smu_get_clock_by_type(&adev->smu, dc_to_pp_clock_type(clk_type), &pp_clks)) { @@ -365,7 +366,7 @@ bool dm_pp_get_clock_levels_by_type( validation_clks.memory_max_clock = 80000; validation_clks.level = 0; } - } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) { if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) { DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); validation_clks.engine_max_clock = 72000; @@ -506,8 +507,8 @@ bool dm_pp_apply_clock_for_voltage_request( ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); - else if (adev->smu.funcs && - adev->smu.funcs->display_clock_voltage_request) + else if (adev->smu.ppt_funcs && + adev->smu.ppt_funcs->display_clock_voltage_request) ret = smu_display_clock_voltage_request(&adev->smu, &pp_clock_request); if (ret) @@ -527,7 +528,7 @@ bool dm_pp_get_static_clocks( ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); if (ret) return false; @@ -589,10 +590,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); - else if (adev->smu.funcs && - adev->smu.funcs->set_watermarks_for_clock_ranges) + else smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges); + &wm_with_clock_ranges); } void pp_rv_set_pme_wa_enable(struct pp_smu *pp) @@ -604,7 +604,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp) if (pp_funcs && pp_funcs->notify_smu_enable_pwe) pp_funcs->notify_smu_enable_pwe(pp_handle); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) smu_notify_smu_enable_pwe(&adev->smu); } @@ -665,7 +665,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - struct smu_context *smu = &adev->smu; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; @@ -708,15 +707,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - if (!smu->funcs) - return PP_SMU_RESULT_UNSUPPORTED; - - /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL; - * 1: fail - */ - if (smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges)) - return PP_SMU_RESULT_UNSUPPORTED; + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); return PP_SMU_RESULT_OK; } @@ -727,10 +718,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */ if (smu_set_azalia_d3_pme(smu)) return PP_SMU_RESULT_FAIL; @@ -743,10 +734,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ if (smu_set_display_count(smu, count)) return PP_SMU_RESULT_FAIL; @@ -759,10 +750,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */ + /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ if (smu_set_deep_sleep_dcefclk(smu, mhz)) return PP_SMU_RESULT_FAIL; @@ -777,13 +768,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -799,13 +790,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -835,7 +826,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; switch (clock_id) { @@ -853,7 +844,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, } clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -869,13 +860,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc) + if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) + if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; @@ -894,13 +885,95 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, if (!smu->ppt_funcs->get_uclk_dpm_states) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->ppt_funcs->get_uclk_dpm_states(smu, + if (!smu_get_uclk_dpm_states(smu, clock_values_in_khz, num_states)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; } +enum pp_smu_status pp_rn_get_dpm_clock_table( + struct pp_smu *pp, struct dpm_clocks *clock_table) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_dpm_clock_table) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_dpm_clock_table(smu, clock_table)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = + wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = + wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz; + + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz; + + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz; + + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz; + + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz; + + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz; + + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz; + } + + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); + + return PP_SMU_RESULT_OK; +} + void dm_pp_get_funcs( struct dc_context *ctx, struct pp_smu_funcs *funcs) @@ -921,7 +994,6 @@ void dm_pp_get_funcs( funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case DCN_VERSION_2_0: funcs->ctx.ver = PP_SMU_VER_NV; funcs->nv_funcs.pp_smu.dm = ctx; @@ -944,7 +1016,13 @@ void dm_pp_get_funcs( funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; -#endif + + case DCN_VERSION_2_1: + funcs->ctx.ver = PP_SMU_VER_RN; + funcs->rn_funcs.pp_smu.dm = ctx; + funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; + funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; + break; default: DRM_ERROR("smu version is not supported !\n"); break; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 627982cb15d2..6e3dddc73246 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -25,19 +25,10 @@ DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN DC_LIBS += dcn20 -endif - - -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_LIBS += dsc -endif - -ifdef CONFIG_DRM_AMD_DC_DCN1_0 DC_LIBS += dcn10 dml -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 DC_LIBS += dcn21 endif @@ -48,6 +39,10 @@ DC_LIBS += dce110 DC_LIBS += dce100 DC_LIBS += dce80 +ifdef CONFIG_DRM_AMD_DC_HDCP +DC_LIBS += hdcp +endif + AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) include $(AMD_DC) @@ -55,7 +50,7 @@ include $(AMD_DC) DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN DISPLAY_CORE += dc_vm_helper.o endif @@ -66,5 +61,6 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) - - +DC_DMUB += dc_dmub_srv.o +AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) +AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile index a50a76471107..7ad0cad0f4ef 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile @@ -25,7 +25,7 @@ # subcomponents. BASICS = conversion.o fixpt31_32.o \ - log_helpers.o vector.o + log_helpers.o vector.o dc_common.o AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c new file mode 100644 index 000000000000..b2fc4f8e6482 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dc_common.h" +#include "basics/conversion.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space) +{ + switch (output_color_space) { + case COLOR_SPACE_SRGB: + case COLOR_SPACE_SRGB_LIMITED: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_ADOBERGB: + return true; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR601_LIMITED: + case COLOR_SPACE_YCBCR709_LIMITED: + case COLOR_SPACE_2020_YCBCR: + return false; + default: + /* Add a case to switch */ + BREAK_TO_DEBUGGER(); + return false; + } +} + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) + return true; + return false; +} + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) + return true; + return false; +} + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) + return true; + if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) + return true; + if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) + return true; + return false; +} + +void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, + const struct dc_plane_state *plane_state) +{ + if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN + && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID + && plane_state->input_csc_color_matrix.enable_adjustment + && plane_state->coeff_reduction_factor.value != 0) { + bias_and_scale->scale_blue = fixed_point_to_int_frac( + dc_fixpt_mul(plane_state->coeff_reduction_factor, + dc_fixpt_from_fraction(256, 255)), + 2, + 13); + bias_and_scale->scale_red = bias_and_scale->scale_blue; + bias_and_scale->scale_green = bias_and_scale->scale_blue; + } else { + bias_and_scale->scale_blue = 0x2000; + bias_and_scale->scale_red = 0x2000; + bias_and_scale->scale_green = 0x2000; + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.h b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h new file mode 100644 index 000000000000..7c0cbf47e8ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.h @@ -0,0 +1,42 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DC_COMMON_H__ +#define __DAL_DC_COMMON_H__ + +#include "core_types.h" + +bool is_rgb_cspace(enum dc_color_space output_color_space); + +bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); + +void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, + const struct dc_plane_state *plane_state); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 221e0f56389f..008d4d11339d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -111,7 +111,7 @@ struct dc_bios *bios_parser_create( return NULL; } -static void destruct(struct bios_parser *bp) +static void bios_parser_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void bios_parser_destroy(struct dc_bios **dcb) return; } - destruct(bp); + bios_parser_destruct(bp); kfree(bp); *dcb = NULL; @@ -2189,7 +2189,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) break; default: break; - }; + } /* Unidentified device ID, return empty support mask. */ return 0; @@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } @@ -2742,7 +2739,6 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -2751,7 +2747,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index dff65c0fe82f..2f1c9584ac32 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -111,7 +111,7 @@ static struct atom_encoder_caps_record *get_encoder_cap_record( #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table) -static void destruct(struct bios_parser *bp) +static void bios_parser2_destruct(struct bios_parser *bp) { kfree(bp->base.bios_local_image); kfree(bp->base.integrated_info); @@ -126,7 +126,7 @@ static void firmware_parser_destroy(struct dc_bios **dcb) return; } - destruct(bp); + bios_parser2_destruct(bp); kfree(bp); *dcb = NULL; @@ -294,11 +294,21 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, struct atom_display_object_path_v2 *object; struct atom_common_record_header *header; struct atom_i2c_record *record; + struct atom_i2c_record dummy_record = {0}; struct bios_parser *bp = BP_FROM_DCB(dcb); if (!info) return BP_RESULT_BADINPUT; + if (id.type == OBJECT_TYPE_GENERIC) { + dummy_record.i2c_id = id.id; + + if (get_gpio_i2c_info(bp, &dummy_record, info) == BP_RESULT_OK) + return BP_RESULT_OK; + else + return BP_RESULT_NORECORD; + } + object = get_bios_object(bp, id); if (!object) @@ -341,6 +351,7 @@ static enum bp_result get_gpio_i2c_info( struct atom_gpio_pin_lut_v2_1 *header; uint32_t count = 0; unsigned int table_index = 0; + bool find_valid = false; if (!info) return BP_RESULT_BADINPUT; @@ -368,33 +379,28 @@ static enum bp_result get_gpio_i2c_info( - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); - table_index = record->i2c_id & I2C_HW_LANE_MUX; - - if (count < table_index) { - bool find_valid = false; - - for (table_index = 0; table_index < count; table_index++) { - if (((record->i2c_id & I2C_HW_CAP) == ( - header->gpio_pin[table_index].gpio_id & - I2C_HW_CAP)) && - ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_ENGINE_ID_MASK)) && - ((record->i2c_id & I2C_HW_LANE_MUX) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_LANE_MUX))) { - /* still valid */ - find_valid = true; - break; - } + for (table_index = 0; table_index < count; table_index++) { + if (((record->i2c_id & I2C_HW_CAP) == ( + header->gpio_pin[table_index].gpio_id & + I2C_HW_CAP)) && + ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == + (header->gpio_pin[table_index].gpio_id & + I2C_HW_ENGINE_ID_MASK)) && + ((record->i2c_id & I2C_HW_LANE_MUX) == + (header->gpio_pin[table_index].gpio_id & + I2C_HW_LANE_MUX))) { + /* still valid */ + find_valid = true; + break; } - /* If we don't find the entry that we are looking for then - * we will return BP_Result_BadBiosTable. - */ - if (find_valid == false) - return BP_RESULT_BADBIOSTABLE; } + /* If we don't find the entry that we are looking for then + * we will return BP_Result_BadBiosTable. + */ + if (find_valid == false) + return BP_RESULT_BADBIOSTABLE; + /* get the GPIO_I2C_INFO */ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false; info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX; @@ -828,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info( case 1: return get_ss_info_v4_1(bp, signal, index, ss_info); case 2: + case 3: return get_ss_info_v4_2(bp, signal, index, ss_info); default: break; @@ -986,7 +993,7 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) break; default: break; - }; + } /* Unidentified device ID, return empty support mask. */ return 0; @@ -1205,6 +1212,8 @@ static enum bp_result get_firmware_info_v3_1( bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; } + info->oem_i2c_present = false; + return BP_RESULT_OK; } @@ -1283,6 +1292,13 @@ static enum bp_result get_firmware_info_v3_2( bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; } + if (firmware_info->board_i2c_feature_id == 0x2) { + info->oem_i2c_present = true; + info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; + } else { + info->oem_i2c_present = false; + } + return BP_RESULT_OK; } @@ -1402,10 +1418,8 @@ static enum bp_result get_integrated_info_v11( info->ma_channel_number = info_v11->umachannelnumber; info->lvds_ss_percentage = le16_to_cpu(info_v11->lvds_ss_percentage); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 info->dp_ss_control = le16_to_cpu(info_v11->reserved1); -#endif info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v11->lvds_ss_rate_10hz); info->hdmi_ss_percentage = @@ -1613,8 +1627,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -1627,6 +1639,7 @@ static enum bp_result construct_integrated_info( /* Don't need to check major revision as they are all 1 */ switch (revision.minor) { case 11: + case 12: result = get_integrated_info_v11(bp, info); break; default: @@ -1644,10 +1657,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j-1].max_supported_clk ) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } @@ -1829,7 +1840,6 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; enum bp_result record_result; const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = { @@ -1838,7 +1848,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n"); return BP_RESULT_BADINPUT; @@ -1918,7 +1927,7 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_board_layout_info = bios_get_board_layout_info, }; -static bool bios_parser_construct( +static bool bios_parser2_construct( struct bios_parser *bp, struct bp_init_data *init, enum dce_version dce_version) @@ -2011,7 +2020,7 @@ struct dc_bios *firmware_parser_create( if (!bp) return NULL; - if (bios_parser_construct(bp, init, dce_version)) + if (bios_parser2_construct(bp, init, dce_version)) return &bp->base; kfree(bp); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index bb2e8105e6ab..2cb7a4288cb7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -37,6 +37,8 @@ #include "bios_parser_types_internal2.h" #include "amdgpu.h" +#include "dc_dmub_srv.h" +#include "dc.h" #define DC_LOGGER \ bp->base.ctx->logger @@ -103,6 +105,21 @@ static void init_dig_encoder_control(struct bios_parser *bp) } } +static void encoder_control_dmcub( + struct dc_dmub_srv *dmcub, + struct dig_encoder_stream_setup_parameters_v1_5 *dig) +{ + struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 }; + + encoder_control.header.type = DMUB_CMD__VBIOS; + encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL; + encoder_control.encoder_control.dig.stream_param = *dig; + + dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl) @@ -155,6 +172,12 @@ static enum bp_result encoder_control_digx_v1_5( break; } + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + encoder_control_dmcub(bp->base.ctx->dmub_srv, ¶ms); + return BP_RESULT_OK; + } + if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params)) result = BP_RESULT_OK; @@ -191,6 +214,22 @@ static void init_transmitter_control(struct bios_parser *bp) } } +static void transmitter_control_dmcub( + struct dc_dmub_srv *dmcub, + struct dig_transmitter_control_parameters_v1_6 *dig) +{ + struct dmub_rb_cmd_dig1_transmitter_control transmitter_control; + + transmitter_control.header.type = DMUB_CMD__VBIOS; + transmitter_control.header.sub_type = + DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL; + transmitter_control.transmitter_control.dig = *dig; + + dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl) @@ -222,6 +261,11 @@ static enum bp_result transmitter_control_v1_6( __func__, ps.param.symclk_10khz); } + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param); + return BP_RESULT_OK; + } /*color_depth not used any more, driver has deep color factor in the Phyclk*/ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps)) @@ -255,7 +299,20 @@ static void init_set_pixel_clock(struct bios_parser *bp) } } +static void set_pixel_clock_dmcub( + struct dc_dmub_srv *dmcub, + struct set_pixel_clock_parameter_v1_7 *clk) +{ + struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 }; + + pixel_clock.header.type = DMUB_CMD__VBIOS; + pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK; + pixel_clock.pixel_clock.clk = *clk; + dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, @@ -331,6 +388,12 @@ static enum bp_result set_pixel_clock_v7( if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN; + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk); + return BP_RESULT_OK; + } + if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk)) result = BP_RESULT_OK; } @@ -585,6 +648,21 @@ static void init_enable_disp_power_gating( } } +static void enable_disp_power_gating_dmcub( + struct dc_dmub_srv *dmcub, + struct enable_disp_power_gating_parameters_v2_1 *pwr) +{ + struct dmub_rb_cmd_enable_disp_power_gating power_gating; + + power_gating.header.type = DMUB_CMD__VBIOS; + power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; + power_gating.power_gating.pwr = *pwr; + + dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); +} + static enum bp_result enable_disp_power_gating_v2_1( struct bios_parser *bp, enum controller_id crtc_id, @@ -604,6 +682,13 @@ static enum bp_result enable_disp_power_gating_v2_1( ps.param.enable = bp->cmd_helper->disp_power_gating_action_to_atom(action); + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv, + &ps.param); + return BP_RESULT_OK; + } + if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param)) result = BP_RESULT_OK; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index db153ddf0fee..7388c987c595 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -55,23 +55,19 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCE_VERSION_11_22: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; -#endif case DCE_VERSION_12_0: case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 985633c08a26..d0714a3d63c8 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -24,15 +24,20 @@ # It calculates Bandwidth and Watermarks values for HW programming # -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +calcs_ccflags := -mhard-float -msse -calcs_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +calcs_ccflags += -mpreferred-stack-boundary=4 +else calcs_ccflags += -msse2 endif @@ -42,7 +47,7 @@ CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautologi BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 9b2cb57bf2ba..a4ddd657598f 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -53,13 +53,9 @@ * remain as-is as it provides us with a guarantee from HW that it is correct. */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /* Defaults from spreadsheet rev#247. * RV2 delta: dram_clock_change_latency, max_num_dpp */ -#else -/* Defaults from spreadsheet rev#247 */ -#endif const struct dcn_soc_bounding_box dcn10_soc_defaults = { /* latencies */ .sr_exit_time = 17, /*us*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index b864869cc7e3..3cd283195091 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -63,7 +63,7 @@ CLK_MGR_DCE120 = dce120_clk_mgr.o AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120) -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN ############################################################################### # DCN10 ############################################################################### @@ -72,9 +72,7 @@ CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_0 ############################################################################### # DCN20 ############################################################################### @@ -83,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 ############################################################################### # DCN21 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index c43797bea413..6d60ef822619 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -37,9 +37,7 @@ #include "dcn10/rv1_clk_mgr.h" #include "dcn10/rv2_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/rn_clk_mgr.h" -#endif int clk_mgr_helper_get_active_display_cnt( @@ -65,6 +63,31 @@ int clk_mgr_helper_get_active_display_cnt( return display_count; } +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (dc->hwss.exit_optimized_pwr_state) + dc->hwss.exit_optimized_pwr_state(dc, dc->current_state); + + if (edp_link) { + clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active; + dc_link_set_psr_allow_active(edp_link, false, false); + } + +} + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (edp_link) + dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false); + + if (dc->hwss.optimize_pwr_state) + dc->hwss.optimize_pwr_state(dc, dc->current_state); + +} struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { @@ -109,14 +132,19 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dce120_clk_mgr_construct(ctx, clk_mgr); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + if (ASICREV_IS_DALI(asic_id.hw_internal_rev)) { + /* TEMP: this check has to come before ASICREV_IS_RENOIR */ + /* which also incorrectly returns true for Dali */ + rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); + break; + } + if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; } -#endif /* DCN2_1 */ if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) { rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); break; @@ -127,13 +155,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p break; } break; -#endif /* Family RV */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case FAMILY_NV: dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; -#endif /* Family NV */ +#endif /* Family RV and NV*/ default: ASSERT(0); /* Unknown Asic */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index c5c8c4901eed..26db1c5d4e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) /* Calculate the current DFS clock, in kHz.*/ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -239,7 +239,7 @@ int dce_set_clock( /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 64); + clk_mgr_dce->base.dentist_vco_freq_khz / 64); /* Prepare to program display clock*/ pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; @@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce) int i; if (bp->integrated_info) - clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) { - clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) - clk_mgr_dce->dentist_vco_freq_khz = 3600000; + clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) { + clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) + clk_mgr_dce->base.dentist_vco_freq_khz = 3600000; } /*update the maximum display clock for each power state*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 7c746ef1e32e..d031bd3d3072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr_base->ctx->dc_bios; - struct dc *core_dc = clk_mgr_base->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr_base->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 62); + clk_mgr_dce->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; @@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, @@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) { struct bp_set_dce_clock_parameters dce_clk_params; struct dc_bios *bp = clk_mgr->base.ctx->dc_bios; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; int actual_clock = requested_clk_khz; /* Prepare to program display clock*/ memset(&dce_clk_params, 0, sizeof(dce_clk_params)); @@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr->dentist_vco_freq_khz / 62); + clk_mgr->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; @@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_clock) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 47f529ce280a..3fab9296918a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, ASSERT(clk_mgr->pp_smu); + if (dc->work_arounds.skip_clock_update) + return; + pp_smu = &clk_mgr->pp_smu->rv_funcs; display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_ clk_mgr->base.dprefclk_khz = 600000; if (bp->integrated_info) - clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) { - clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) { + clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; } if (!debug->disable_dfs_bypass && bp->integrated_info) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 1897e91c8ccb..97b7f32294fd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param( @@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di /* Actual dispclk set is returned in the parameter register */ actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000; - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) dmcu->funcs->set_psr_wait_loop(dmcu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 3e8ac303bd52..25d7b7c6681c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, { int i; + clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { int dpp_inst, dppclk_khz; - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; + /* Loop index will match dpp->inst if resource exists, + * and we want to avoid dependency on dpp object + */ + dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + clk_mgr->dccg->funcs->update_dpp_dto( - clk_mgr->dccg, dpp_inst, dppclk_khz, false); + clk_mgr->dccg, dpp_inst, dppclk_khz); } } -static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) { int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; - - uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); - - REG_UPDATE(DENTIST_DISPCLK_CNTL, - DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); - REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); -} - -static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) -{ + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz; int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); +// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100); + REG_UPDATE(DENTIST_DISPCLK_CNTL, + DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); } -static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dispclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dispclk_khz = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - - update_display_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); -} - -static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dppclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dppclk_khz = khz; - clk_mgr->dccg->ref_dppclk = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - - update_global_dpp_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); -} void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, @@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc *dc = clk_mgr_base->ctx->dc; struct pp_smu_funcs_nv *pp_smu = NULL; int display_count; + bool update_dppclk = false; bool update_dispclk = false; bool enter_display_off = false; + bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; - int i; if (dc->work_arounds.skip_clock_update) return; @@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; if (pp_smu && pp_smu->set_pstate_handshake_support) pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); } - clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; @@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000); } - if (dc->config.forced_clocks == false) { - // First update display clock - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) - request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz); - - // Updating DPP clock requires some more logic - if (!safe_to_lower) { - // For pre-programming, we need to make sure any DPP clock that will go up has to go up + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz; - // First raise the global reference if needed - if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - // Then raise any dividers that need raising - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; + update_dppclk = true; + } - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + update_dispclk = true; + } - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true); - } + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { + if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dentist(clk_mgr); } else { - // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs - - if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); - - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; - - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false); - } + // if clock is being raised, increase refclk before lowering DTO + if (update_dppclk || update_dispclk) + dcn20_update_clocks_update_dentist(clk_mgr); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } } + if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { /*update dmcu for wait_loop count*/ @@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; @@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; } - /* Both fclk and dppclk ref are run on the same scemi clock so we - * need to keep the same value for both + /* Both fclk and ref_dppclk run on the same scemi clock. + * So take the higher value since the DPP DTO is typically programmed + * such that max dppclk is 1:1 with ref_dppclk. */ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; + // Both fclk and ref_dppclk run on the same scemi clock. + clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; + dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); } @@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, } } +static bool dcn2_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->socclk_khz != b->socclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + else if (a->phyclk_khz != b->phyclk_khz) + return false; + else if (a->dramclk_khz != b->dramclk_khz) + return false; + else if (a->p_state_change_support != b->p_state_change_support) + return false; + + return true; +} + static struct clk_mgr_funcs dcn2_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dcn2_update_clocks, .init_clocks = dcn2_init_clocks, .enable_pme_wa = dcn2_enable_pme_wa, .get_clock = dcn2_get_clock, + .are_clock_states_equal = dcn2_are_clock_states_equal, }; @@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3850000; + clk_mgr->base.dentist_vco_freq_khz = 3850000; } else { /* DFS Slice 2 should be used for DPREFCLK */ @@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct( pll_req = dc_fixpt_mul_int(pll_req, 100000); /* integer part is now VCO frequency in kHz */ - clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3850000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3850000; /* Calculate the DPREFCLK in kHz.*/ clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; } //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index ac31a9787305..c9fd824f3c23 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 787f94d815f4..de51ef12e33a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -52,6 +52,45 @@ #define REG(reg_name) \ (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ +int rn_get_active_display_cnt_wa( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + bool hdmi_present = false; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + hdmi_present = true; + } + + for (i = 0; i < dc->link_count; i++) { + const struct dc_link *link = dc->links[i]; + + /* + * Only notify active stream or virtual stream. + * Need to notify virtual stream to work around + * headless case. HPD does not fire when system is in + * S0i2. + */ + /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL || + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + display_count++; + } + + /* WA for hang on HDMI after display off back back on*/ + if (display_count == 0 && hdmi_present) + display_count = 1; + + return display_count; +} + void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, int display_count; bool update_dppclk = false; bool update_dispclk = false; - bool enter_display_off = false; bool dpp_clock_lowered = false; - struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - if (display_count == 0) - enter_display_off = true; + if (dc->work_arounds.skip_clock_update) + return; - if (enter_display_off == safe_to_lower) { - rn_vbios_smu_set_display_count(clk_mgr, display_count); + /* + * if it is safe to lower, but we are already in the lower state, we don't have to do anything + * also if safe to lower is false, we just go in the higher state + */ + if (safe_to_lower) { + /* check that we're not already in lower */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { + + display_count = rn_get_active_display_cnt_wa(dc, context); + /* if we can go lower, go lower */ + if (display_count == 0) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; + } + } + } else { + /* check that we're not already in D0 */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; + } } if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) { @@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); - if (update_dppclk) + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } @@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - s->dprefclk_khz = sb.dprefclk; + s->dprefclk_khz = sb.dprefclk * 1000; } void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -329,81 +388,14 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) rn_vbios_smu_enable_pme_wa(clk_mgr); } -static struct clk_mgr_funcs dcn21_funcs = { - .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, - .update_clocks = rn_update_clocks, - .init_clocks = dcn2_init_clocks, - .enable_pme_wa = rn_enable_pme_wa, - /* .dump_clk_registers = rn_dump_clk_registers */ -}; - -struct clk_bw_params rn_bw_params = { - .vram_type = Ddr4MemType, - .num_channels = 1, - .clk_table = { - .entries = { - { - .voltage = 0, - .dcfclk_mhz = 400, - .fclk_mhz = 400, - .memclk_mhz = 800, - .socclk_mhz = 0, - }, - { - .voltage = 0, - .dcfclk_mhz = 483, - .fclk_mhz = 800, - .memclk_mhz = 1600, - .socclk_mhz = 0, - }, - { - .voltage = 0, - .dcfclk_mhz = 602, - .fclk_mhz = 1067, - .memclk_mhz = 1067, - .socclk_mhz = 0, - }, - { - .voltage = 0, - .dcfclk_mhz = 738, - .fclk_mhz = 1333, - .memclk_mhz = 1600, - .socclk_mhz = 0, - }, - }, - - .num_entries = 4, - }, - - .wm_table = { - .entries = { - { - .wm_inst = WM_A, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - { - .wm_inst = WM_B, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - { - .wm_inst = WM_C, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - { - .wm_inst = WM_D, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 23.84, - .valid = true, - }, - }, - } -}; +void rn_init_clocks(struct clk_mgr *clk_mgr) +{ + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; +} void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) { @@ -462,31 +454,215 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra } -void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; + struct pp_smu_wm_range_sets ranges = {0}; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; + + if (!debug->disable_pplib_wm_range) { + build_watermark_ranges(clk_mgr_base->bw_params, &ranges); + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) + pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + } + +} + +static bool rn_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + + return true; +} + + +static struct clk_mgr_funcs dcn21_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = rn_update_clocks, + .init_clocks = rn_init_clocks, + .enable_pme_wa = rn_enable_pme_wa, + .are_clock_states_equal = rn_are_clock_states_equal, + .notify_wm_ranges = rn_notify_wm_ranges +}; + +struct clk_bw_params rn_bw_params = { + .vram_type = Ddr4MemType, + .num_channels = 1, + .clk_table = { + .entries = { + { + .voltage = 0, + .dcfclk_mhz = 400, + .fclk_mhz = 400, + .memclk_mhz = 800, + .socclk_mhz = 0, + }, + { + .voltage = 0, + .dcfclk_mhz = 483, + .fclk_mhz = 800, + .memclk_mhz = 1600, + .socclk_mhz = 0, + }, + { + .voltage = 0, + .dcfclk_mhz = 602, + .fclk_mhz = 1067, + .memclk_mhz = 1067, + .socclk_mhz = 0, + }, + { + .voltage = 0, + .dcfclk_mhz = 738, + .fclk_mhz = 1333, + .memclk_mhz = 1600, + .socclk_mhz = 0, + }, + }, + + .num_entries = 4, + }, + +}; + +struct wm_table ddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 6.09, + .sr_enter_plus_exit_time_us = 7.14, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + } +}; + +struct wm_table lpddr4_wm_table = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 5.32, + .sr_enter_plus_exit_time_us = 6.38, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.82, + .sr_enter_plus_exit_time_us = 11.196, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.89, + .sr_enter_plus_exit_time_us = 11.24, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.748, + .sr_enter_plus_exit_time_us = 11.102, + .valid = true, + }, + } +}; + + +static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { int i; + for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) { + if (clock_table->DcfClocks[i].Vol == voltage) + return clock_table->DcfClocks[i].Freq; + } + + ASSERT(0); + return 0; +} + +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info) +{ + int i, j = 0; + + j = -1; + ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); - for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) { - if (clock_table->FClocks[i].Freq == 0) + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { + if (clock_table->FClocks[i].Freq != 0) { + j = i; break; + } + } + + if (j == -1) { + /* clock table is all 0s, just use our own hardcode */ + ASSERT(0); + return; + } + + bw_params->clk_table.num_entries = j + 1; - bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq; - bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq; - bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq; - bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq; - bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol; + for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq; + bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol; + bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); } - bw_params->clk_table.num_entries = i; - bw_params->vram_type = asic_id->vram_type; - bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; + bw_params->vram_type = bios_info->memory_type; + bw_params->num_channels = bios_info->ma_channel_number; for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; - if (clock_table->FClocks[i].Freq == 0) { + if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } @@ -515,7 +691,6 @@ void rn_clk_mgr_construct( { struct dc_debug_options *debug = &ctx->dc->debug; struct dpm_clocks clock_table = { 0 }; - struct clk_state_registers_and_bypass s = { 0 }; clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; @@ -534,57 +709,42 @@ void rn_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3600000; - clk_mgr->base.dprefclk_khz = 600000; + clk_mgr->base.dentist_vco_freq_khz = 3600000; } else { struct clk_log_info log_info = {0}; /* TODO: Check we get what we expect during bringup */ - clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; - - rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); - clk_mgr->base.dprefclk_khz = s.dprefclk; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; - if (clk_mgr->base.dprefclk_khz != 600000) { - clk_mgr->base.dprefclk_khz = 600000; - ASSERT(1); //TODO: Renoir follow up. + if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { + rn_bw_params.wm_table = lpddr4_wm_table; + } else { + rn_bw_params.wm_table = ddr4_wm_table; } - - /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dprefclk_khz == 0) - clk_mgr->base.dprefclk_khz = 600000; + /* Saved clocks configured at boot for debug purposes */ + rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); } + clk_mgr->base.dprefclk_khz = 600000; dce_clock_read_ss_info(clk_mgr); + clk_mgr->base.bw_params = &rn_bw_params; - if (pp_smu) { + if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); + if (ctx->dc_bios && ctx->dc_bios->integrated_info) { + rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); + } } - /* - * Notify SMU which set of WM should be selected for different ranges of fclk - * On Renoir there is a maximumum of 4 DF pstates supported, could be less - * depending on DDR speed and fused maximum fclk. - */ - if (!debug->disable_pplib_wm_range) { - struct pp_smu_wm_range_sets ranges = {0}; - - build_watermark_ranges(clk_mgr->base.bw_params, &ranges); - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) - pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { + /* enable powerfeatures when displaycount goes to 0 */ + rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); } - - /* enable powerfeatures when displaycount goes to 0 */ - if (!debug->disable_48mhz_pwrdwn) - rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h index aadec06fde10..e4322fa5475b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h @@ -26,11 +26,13 @@ #ifndef __RN_CLK_MGR_H__ #define __RN_CLK_MGR_H__ +#include "clk_mgr.h" +#include "dm_pp_smu.h" + struct rn_clk_registers { uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */ }; - void rn_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 50984c1811bb..6878aedf1d3e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,7 +33,7 @@ #include "mp/mp_12_0_0_sh_mask.h" #define REG(reg_name) \ - (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) #define FN(reg_name, field) \ FD(reg_name##__##field) @@ -82,20 +82,16 @@ int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr) int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz) { int actual_dispclk_set_mhz = -1; - struct dc *core_dc = clk_mgr->base.ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; - uint32_t clk = requested_dispclk_khz / 1000; - - if (clk <= 100) - clk = 101; + struct dc *dc = clk_mgr->base.ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDispclkFreq, - clk); + requested_dispclk_khz / 1000); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz) dmcu->funcs->set_psr_wait_loop(dmcu, @@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque { int actual_dcfclk_set_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_dcfclk_set_mhz; actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int { int actual_min_ds_dcfclk_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_min_ds_dcfclk_mhz; actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param( @@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_ { int actual_dppclk_set_mhz = -1; - uint32_t clk = requested_dpp_khz / 1000; - - if (clk <= 100) - clk = 101; - actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, - clk); + requested_dpp_khz / 1000); return actual_dppclk_set_mhz * 1000; } -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count) +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state) { + int disp_count; + + if (state == DCN_PWR_STATE_LOW_POWER) + disp_count = 0; + else + disp_count = 1; + rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDisplayCount, - display_count); + clk_mgr, + VBIOSSMC_MSG_SetDisplayCount, + disp_count); } -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr) +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, - 0); + enable); } void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index da3a49487c6d..ccc01879c9d4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count); -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr); +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count); +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5d1adeda4d90..39fe38cb39b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -58,21 +58,18 @@ #include "hubp.h" #include "dc_link_dp.h" +#include "dc_dmub_srv.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "vm_helper.h" -#endif #include "dce/dce_i2c.h" #define DC_LOGGER \ dc->ctx->logger -const static char DC_BUILD_ID[] = "production-build"; +static const char DC_BUILD_ID[] = "production-build"; /** * DOC: Overview @@ -194,7 +191,7 @@ static bool create_links( } } - if (!should_destory_link) { + if (dc->config.force_enum_edp || !should_destory_link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; @@ -411,6 +408,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, return false; } +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option) +{ + /* OPP FMT dyn expansion updates*/ + int i = 0; + struct pipe_ctx *pipe_ctx; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + pipe_ctx->stream_res.opp->dyn_expansion = option; + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + stream->timing.display_color_depth, + stream->signal); + } + } +} + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { @@ -515,7 +533,7 @@ void dc_stream_set_static_screen_events(struct dc *dc, dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); } -static void destruct(struct dc *dc) +static void dc_destruct(struct dc *dc) { if (dc->current_state) { dc_release_state(dc->current_state); @@ -548,7 +566,7 @@ static void destruct(struct dc *dc) kfree(dc->bw_dceip); dc->bw_dceip = NULL; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN kfree(dc->dcn_soc); dc->dcn_soc = NULL; @@ -556,20 +574,18 @@ static void destruct(struct dc *dc) dc->dcn_ip = NULL; #endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 kfree(dc->vm_helper); dc->vm_helper = NULL; -#endif } -static bool construct(struct dc *dc, +static bool dc_construct(struct dc *dc, const struct dc_init_data *init_params) { struct dc_context *dc_ctx; struct bw_calcs_dceip *dc_dceip; struct bw_calcs_vbios *dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; #endif @@ -577,11 +593,13 @@ static bool construct(struct dc *dc, enum dce_version dc_version = DCE_VERSION_UNKNOWN; dc->config = init_params->flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 // Allocate memory for the vm_helper dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); + if (!dc->vm_helper) { + dm_error("%s: failed to create dc->vm_helper\n", __func__); + goto fail; + } -#endif memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); @@ -599,7 +617,7 @@ static bool construct(struct dc *dc, } dc->bw_vbios = dc_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); if (!dcn_soc) { dm_error("%s: failed to create dcn_soc\n", __func__); @@ -615,10 +633,8 @@ static bool construct(struct dc *dc, } dc->dcn_ip = dcn_ip; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 dc->soc_bounding_box = init_params->soc_bounding_box; #endif -#endif dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); if (!dc_ctx) { @@ -689,10 +705,8 @@ static bool construct(struct dc *dc, if (!dc->clk_mgr) goto fail; -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 if (dc->res_pool->funcs->update_bw_bounding_box) dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); -#endif /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because @@ -715,11 +729,10 @@ static bool construct(struct dc *dc, fail: - destruct(dc); + dc_destruct(dc); return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static bool disable_all_writeback_pipes_for_stream( const struct dc *dc, struct dc_stream_state *stream, @@ -732,7 +745,6 @@ static bool disable_all_writeback_pipes_for_stream( return true; } -#endif static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { @@ -758,11 +770,12 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); -#endif - dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); } + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, dangling_context); } current_ctx = dc->current_state; @@ -782,12 +795,9 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (NULL == dc) goto alloc_fail; - if (false == construct(dc, init_params)) + if (false == dc_construct(dc, init_params)) goto construct_fail; - /*TODO: separate HW and SW initialization*/ - dc->hwss.init_hw(dc); - full_pipe_count = dc->res_pool->pipe_count; if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) full_pipe_count--; @@ -799,6 +809,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->caps.max_audios = dc->res_pool->audio_count; dc->caps.linear_pitch_alignment = 64; + dc->caps.max_dp_protocol_version = DP_VERSION_1_4; + /* Populate versioning information */ dc->versions.dc_ver = DC_VER; @@ -820,14 +832,29 @@ alloc_fail: return NULL; } +void dc_hardware_init(struct dc *dc) +{ + dc->hwss.init_hw(dc); +} + void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params) { +#ifdef CONFIG_DRM_AMD_DC_HDCP + dc->ctx->cp_psp = init_params->cp_psp; +#endif +} + +void dc_deinit_callbacks(struct dc *dc) +{ +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); +#endif } void dc_destroy(struct dc **dc) { - destruct(*dc); + dc_destruct(*dc); kfree(*dc); *dc = NULL; } @@ -901,15 +928,11 @@ static void program_timing_sync( /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { - struct pipe_ctx *temp; - if (pipe_set[j]->plane_state) { if (j == 0) break; - temp = pipe_set[0]; - pipe_set[0] = pipe_set[j]; - pipe_set[j] = temp; + swap(pipe_set[0], pipe_set[j]); break; } } @@ -966,40 +989,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, struct dc_crtc_timing *crtc_timing) { struct timing_generator *tg; + struct stream_encoder *se = NULL; + + struct dc_crtc_timing hw_crtc_timing = {0}; + struct dc_link *link = sink->link; - unsigned int enc_inst, tg_inst; + unsigned int i, enc_inst, tg_inst = 0; + + // Seamless port only support single DP and EDP so far + if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && + sink->sink_signal != SIGNAL_TYPE_EDP) + return false; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (enc_inst >= dc->res_pool->pipe_count) + if (enc_inst == ENGINE_ID_UNKNOWN) return false; - if (enc_inst >= dc->res_pool->stream_enc_count) - return false; + for (i = 0; i < dc->res_pool->stream_enc_count; i++) { + if (dc->res_pool->stream_enc[i]->id == enc_inst) { + + se = dc->res_pool->stream_enc[i]; + + tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( + dc->res_pool->stream_enc[i]); + break; + } + } - tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg( - dc->res_pool->stream_enc[enc_inst]); + // tg_inst not found + if (i == dc->res_pool->stream_enc_count) + return false; if (tg_inst >= dc->res_pool->timing_generator_count) return false; tg = dc->res_pool->timing_generators[tg_inst]; - if (!tg->funcs->is_matching_timing) + if (!tg->funcs->get_hw_timing) + return false; + + if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) + return false; + + if (crtc_timing->h_total != hw_crtc_timing.h_total) + return false; + + if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) + return false; + + if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) + return false; + + if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) + return false; + + if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) return false; - if (!tg->funcs->is_matching_timing(tg, crtc_timing)) + if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) + return false; + + if (crtc_timing->v_total != hw_crtc_timing.v_total) + return false; + + if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) + return false; + + if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) + return false; + + if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) + return false; + + if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) + return false; + + if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) return false; if (dc_is_dp_signal(link->connector_signal)) { @@ -1012,6 +1082,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; + if (!se->funcs->dp_get_pixel_format) + return false; + + if (!se->funcs->dp_get_pixel_format( + se, + &hw_crtc_timing.pixel_encoding, + &hw_crtc_timing.display_color_depth)) + return false; + + if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) + return false; + + if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) + return false; } return true; @@ -1073,15 +1157,16 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* re-program planes for existing stream, in case we need to * free up plane resource for later use */ - for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mode_changed) - continue; + if (dc->hwss.apply_ctx_for_surface) + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->mode_changed) + continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); /* use new pipe config in new context */ - } + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* use new pipe config in new context */ + } /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1100,16 +1185,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; if (!context->streams[i]->mode_changed) continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* * enable stereo @@ -1137,14 +1225,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); if (!dc->optimize_seamless_boot) - /* pplib is notified if disp_num changed */ - dc->hwss.optimize_bandwidth(dc, context); + /* pplib is notified if disp_num changed */ + dc->hwss.optimize_bandwidth(dc, context); for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; - memset(&context->commit_hints, 0, sizeof(context->commit_hints)); - dc_release_state(dc->current_state); dc->current_state = context; @@ -1210,7 +1296,7 @@ struct dc_state *dc_create_state(struct dc *dc) * initialize and obtain IP and SOC the base DML instance from DC is * initially copied into every context */ -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); #endif @@ -1383,11 +1469,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa elevate_update_type(&update_type, UPDATE_TYPE_MED); } - if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) { - update_flags->bits.sdr_white_level = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); - } - if (u->plane_info->dcc.enable != u->surface->dcc.enable || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { @@ -1405,7 +1486,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch - || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); @@ -1444,7 +1524,10 @@ static enum surface_update_type get_scaling_info_update_type( if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || u->scaling_info->clip_rect.height != u->surface->clip_rect.height || u->scaling_info->dst_rect.width != u->surface->dst_rect.width - || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) { + || u->scaling_info->dst_rect.height != u->surface->dst_rect.height + || u->scaling_info->scaling_quality.integer_scaling != + u->surface->scaling_quality.integer_scaling + ) { update_flags->bits.scaling_change = 1; if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width @@ -1492,20 +1575,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc, enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; - update_flags->raw = 0; // Reset all flags - if (u->flip_addr) update_flags->bits.addr_update = 1; - if (!is_surface_in_context(context, u->surface)) { - update_flags->bits.new_plane = 1; + if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; } - if (u->surface->force_full_update) { - update_flags->bits.full_update = 1; - return UPDATE_TYPE_FULL; - } + update_flags->raw = 0; // Reset all flags type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1537,6 +1615,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->bits.gamma_change = 1; } + if (u->hdr_mult.value) + if (u->hdr_mult.value != u->surface->hdr_mult.value) { + update_flags->bits.hdr_mult = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_MED); + } + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); @@ -1563,40 +1647,45 @@ static enum surface_update_type check_update_surfaces_for_stream( enum surface_update_type overall_type = UPDATE_TYPE_FAST; if (stream_status == NULL || stream_status->plane_count != surface_count) - return UPDATE_TYPE_FULL; + overall_type = UPDATE_TYPE_FULL; /* some stream updates require passive update */ if (stream_update) { - if ((stream_update->src.height != 0) && - (stream_update->src.width != 0)) - return UPDATE_TYPE_FULL; + union stream_update_flags *su_flags = &stream_update->stream->update_flags; - if ((stream_update->dst.height != 0) && - (stream_update->dst.width != 0)) - return UPDATE_TYPE_FULL; + if ((stream_update->src.height != 0 && stream_update->src.width != 0) || + (stream_update->dst.height != 0 && stream_update->dst.width != 0) || + stream_update->integer_scaling_update) + su_flags->bits.scaling = 1; if (stream_update->out_transfer_func) - return UPDATE_TYPE_FULL; + su_flags->bits.out_tf = 1; if (stream_update->abm_level) - return UPDATE_TYPE_FULL; + su_flags->bits.abm_level = 1; if (stream_update->dpms_off) - return UPDATE_TYPE_FULL; + su_flags->bits.dpms_off = 1; + + if (stream_update->gamut_remap) + su_flags->bits.gamut_remap = 1; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (stream_update->wb_update) - return UPDATE_TYPE_FULL; -#endif + su_flags->bits.wb_update = 1; + if (su_flags->raw != 0) + overall_type = UPDATE_TYPE_FULL; + + if (stream_update->output_csc_transform || stream_update->output_color_space) + su_flags->bits.out_csc = 1; + + if (stream_update->dsc_config) + overall_type = UPDATE_TYPE_FULL; } for (i = 0 ; i < surface_count; i++) { enum surface_update_type type = det_surface_update(dc, &updates[i]); - if (type == UPDATE_TYPE_FULL) - return type; - elevate_update_type(&overall_type, type); } @@ -1618,16 +1707,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream( int i; enum surface_update_type type; + if (stream_update) + stream_update->stream->update_flags.raw = 0; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) + if (type == UPDATE_TYPE_FULL) { + if (stream_update) + stream_update->stream->update_flags.raw = 0xFFFFFFFF; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } - if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) - dc->optimized_required = true; + if (type == UPDATE_TYPE_FAST) { + // If there's an available clock comparator, we use that. + if (dc->clk_mgr->funcs->are_clock_states_equal) { + if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) + dc->optimized_required = true; + // Else we fallback to mem compare. + } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { + dc->optimized_required = true; + } + } return type; } @@ -1703,8 +1805,6 @@ static void copy_surface_update_to_plane( srf_update->plane_info->global_alpha_value; surface->dcc = srf_update->plane_info->dcc; - surface->sdr_white_level = - srf_update->plane_info->sdr_white_level; surface->layer_index = srf_update->plane_info->layer_index; } @@ -1737,7 +1837,6 @@ static void copy_surface_update_to_plane( sizeof(struct dc_transfer_func_distributed_points)); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (srf_update->func_shaper && (surface->in_shaper_func != srf_update->func_shaper)) @@ -1750,13 +1849,16 @@ static void copy_surface_update_to_plane( memcpy(surface->lut3d_func, srf_update->lut3d_func, sizeof(*surface->lut3d_func)); + if (srf_update->hdr_mult.value) + surface->hdr_mult = + srf_update->hdr_mult; + if (srf_update->blend_tf && (surface->blend_tf != srf_update->blend_tf)) memcpy(surface->blend_tf, srf_update->blend_tf, sizeof(*surface->blend_tf)); -#endif if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = *srf_update->input_csc_color_matrix; @@ -1769,8 +1871,10 @@ static void copy_surface_update_to_plane( static void copy_stream_update_to_stream(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, - const struct dc_stream_update *update) + struct dc_stream_update *update) { + struct dc_context *dc_ctx = dc->ctx; + if (update == NULL || stream == NULL) return; @@ -1831,7 +1935,6 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dither_option) stream->dither_option = *update->dither_option; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* update current stream with writeback info */ if (update->wb_update) { int i; @@ -1842,23 +1945,32 @@ static void copy_stream_update_to_stream(struct dc *dc, stream->writeback_info[i] = update->wb_update->writeback_info[i]; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) if (update->dsc_config) { struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; uint32_t old_dsc_enabled = stream->timing.flags.DSC; uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && update->dsc_config->num_slices_v != 0); - stream->timing.dsc_cfg = *update->dsc_config; - stream->timing.flags.DSC = enable_dsc; - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, - true)) { - stream->timing.dsc_cfg = old_dsc_cfg; - stream->timing.flags.DSC = old_dsc_enabled; + /* Use temporarry context for validating new DSC config */ + struct dc_state *dsc_validate_context = dc_create_state(dc); + + if (dsc_validate_context) { + dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); + + stream->timing.dsc_cfg = *update->dsc_config; + stream->timing.flags.DSC = enable_dsc; + if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { + stream->timing.dsc_cfg = old_dsc_cfg; + stream->timing.flags.DSC = old_dsc_enabled; + update->dsc_config = NULL; + } + + dc_release_state(dsc_validate_context); + } else { + DC_ERROR("Failed to allocate new validate context for DSC change\n"); + update->dsc_config = NULL; } } -#endif } static void commit_planes_do_stream_update(struct dc *dc, @@ -1868,6 +1980,7 @@ static void commit_planes_do_stream_update(struct dc *dc, struct dc_state *context) { int j; + bool should_program_abm; // Stream updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -1877,11 +1990,11 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->periodic_interrupt0 && dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0); + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); if (stream_update->periodic_interrupt1 && dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1); + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || @@ -1891,6 +2004,12 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.update_info_frame(pipe_ctx); } + if (stream_update->hdr_static_metadata && + stream->use_dynamic_meta && + dc->hwss.set_dmdata_attributes && + pipe_ctx->stream->dmdata_address.quad_part != 0) + dc->hwss.set_dmdata_attributes(pipe_ctx); + if (stream_update->gamut_remap) dc_stream_set_gamut_remap(dc, stream); @@ -1898,31 +2017,25 @@ static void commit_planes_do_stream_update(struct dc *dc, dc_stream_program_csc_matrix(dc, stream); if (stream_update->dither_option) { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif resource_build_bit_depth_reduction_params(pipe_ctx->stream, &pipe_ctx->stream->bit_depth_params); pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, &stream->bit_depth_params, &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } -#endif } -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) { dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true); dp_update_dsc_config(pipe_ctx); dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false); } -#endif /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -1948,14 +2061,21 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { - if (pipe_ctx->stream_res.tg->funcs->is_blanked) { - // if otg funcs defined check if blanked before programming - if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = true; + + // if otg funcs defined check if blanked before programming + if (pipe_ctx->stream_res.tg->funcs->is_blanked) + if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = false; + + if (should_program_abm) { + if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { + pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm); + } else { pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); - } else - pipe_ctx->stream_res.abm->funcs->set_abm_level( - pipe_ctx->stream_res.abm, stream->abm_level); + } + } } } } @@ -2000,11 +2120,14 @@ static void commit_planes_for_stream(struct dc *dc, * In case of turning off screen, no need to program front end a second time. * just return after program blank. */ - dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); + return; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (!IS_DIAG_DC(dc->ctx->dce_environment)) { for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2026,7 +2149,6 @@ static void commit_planes_for_stream(struct dc *dc, } } } -#endif // Update Type FULL, Surface updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -2047,7 +2169,6 @@ static void commit_planes_for_stream(struct dc *dc, if (update_type == UPDATE_TYPE_FAST) continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); if (dc->hwss.program_triplebuffer != NULL && @@ -2056,14 +2177,32 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.program_triplebuffer( dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); } -#endif stream_status = stream_get_status(context, pipe_ctx->stream); - dc->hwss.apply_ctx_for_surface( + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); } } + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { + dc->hwss.program_front_end_for_ctx(dc, context); +#ifdef CONFIG_DRM_AMD_DC_DCN + if (dc->debug.validate_dml_output) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; + if (cur_pipe.stream == NULL) + continue; + + cur_pipe.plane_res.hubp->funcs->validate_dml_output( + cur_pipe.plane_res.hubp, dc->ctx, + &context->res_ctx.pipe_ctx[i].rq_regs, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs); + } + } +#endif + } // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { @@ -2073,7 +2212,6 @@ static void commit_planes_for_stream(struct dc *dc, */ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (dc->hwss.set_flip_control_gsl) for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2092,7 +2230,6 @@ static void commit_planes_for_stream(struct dc *dc, plane_state->flip_immediate); } } -#endif /* Perform requested Updates */ for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2105,7 +2242,6 @@ static void commit_planes_for_stream(struct dc *dc, if (pipe_ctx->plane_state != plane_state) continue; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /*program triple buffer after lock based on flip type*/ if (dc->hwss.program_triplebuffer != NULL && !dc->debug.disable_tri_buf) { @@ -2113,7 +2249,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.program_triplebuffer( dc, pipe_ctx, plane_state->triplebuffer_flips); } -#endif if (srf_updates[i].flip_addr) dc->hwss.update_plane_addr(dc, pipe_ctx); } @@ -2274,14 +2409,15 @@ void dc_set_power_state( case DC_ACPI_CM_POWER_STATE_D0: dc_resource_state_construct(dc, dc->current_state); + if (dc->ctx->dmub_srv) + dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); + dc->hwss.init_hw(dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 if (dc->hwss.init_sys_ctx != NULL && dc->vm_pa_config.valid) { dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } -#endif break; default: @@ -2361,6 +2497,17 @@ bool dc_submit_i2c( cmd); } +bool dc_submit_i2c_oem( + struct dc *dc, + struct i2c_command *cmd) +{ + struct ddc_service *ddc = dc->res_pool->oem_device; + return dce_i2c_submit_command( + dc->res_pool, + ddc->ddc_pin, + cmd); +} + static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) { if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index b9227d5de3a3..502ed3c7959d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -33,7 +33,6 @@ #include "core_status.h" #include "core_types.h" -#include "hw_sequencer.h" #include "resource.h" @@ -310,14 +309,13 @@ void context_timing_trace( struct resource_context *res_ctx) { int i; - struct dc *core_dc = dc; int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; struct crtc_position position; - unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; + unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; DC_LOGGER_INIT(dc->ctx->logger); - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; /* get_position() returns CRTC vertical/horizontal counter * hence not applicable for underlay pipe @@ -329,7 +327,7 @@ void context_timing_trace( h_pos[i] = position.horizontal_count; v_pos[i] = position.vertical_count; } - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) @@ -347,7 +345,7 @@ void context_clock_trace( struct dc *dc, struct dc_state *context) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOGGER_INIT(dc->ctx->logger); CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ca20b150afcc..cef8c1ba9797 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -74,12 +74,11 @@ enum { /******************************************************************************* * Private functions ******************************************************************************/ -static void destruct(struct dc_link *link) +static void dc_link_destruct(struct dc_link *link) { int i; if (link->hpd_gpio != NULL) { - dal_gpio_close(link->hpd_gpio); dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } @@ -373,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) if (GPIO_RESULT_OK != dal_ddc_open( ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) { - dal_gpio_destroy_ddc(&ddc); + dal_ddc_close(ddc); return present; } @@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin } -static void read_edp_current_link_settings_on_detect(struct dc_link *link) +static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = { {0} }; uint8_t link_bw_set; @@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) &link_bw_set, sizeof(link_bw_set)); if (link_bw_set == 0) { - /* If standard link rates are not being used, - * Read DPCD 00115h to find the link rate set used - */ - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; - link->cur_link_settings.link_rate_set = link_rate_set; - link->cur_link_settings.use_link_rate_set = true; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the edp link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // edp_supported_link_rates_count = 0 for DP + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + // Link Rate not found. Seamless boot may not work. + ASSERT(false); } } else { link->cur_link_settings.link_rate = link_bw_set; @@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); } -bool wait_for_alt_mode(struct dc_link *link) +static bool wait_for_alt_mode(struct dc_link *link) { /** @@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link) * This does not create remote sinks but will trigger DM * to start MST detection if a branch is detected. */ -bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +static bool dc_link_detect_helper(struct dc_link *link, + enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; struct display_sink_capability sink_caps = { 0 }; @@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) struct dpcd_caps prev_dpcd_caps; bool same_dpcd = true; enum dc_connection_type new_connection_type = dc_connection_none; + bool perform_dp_seamless_boot = false; + DC_LOGGER_INIT(link->ctx->logger); if (dc_is_virtual_signal(link->connector_signal)) @@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { - read_edp_current_link_settings_on_detect(link); + read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); - sink_caps.transaction_type = - DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; } case SIGNAL_TYPE_DISPLAY_PORT: { + /* wa HPD high coming too early*/ if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { @@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * empty which leads to allocate_mst_payload() has "0" * pbn_per_slot value leading to exception on dc_fixpt_div() */ - link->verified_link_cap = link->reported_link_cap; + dp_verify_mst_link_cap(link); + if (prev_sink != NULL) dc_sink_release(prev_sink); return false; } + // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. + if (reason == DETECT_REASON_BOOT && + dc_ctx->dc->config.power_down_display_on_boot == false && + link->link_status.link_active == true) + perform_dp_seamless_boot = true; + + if (perform_dp_seamless_boot) { + read_current_link_settings_on_detect(link); + link->verified_link_cap = link->reported_link_cap; + } + break; } @@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * two link trainings */ - /* deal with non-mst cases */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); + // verify link cap for SST non-seamless boot + if (!perform_dp_seamless_boot) + dp_verify_link_cap_with_retries(link, + &link->reported_link_cap, + LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { @@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) dc_sink_release(prev_sink); return true; + +} + +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + const struct dc *dc = link->dc; + bool ret; + + /* get out of low power state */ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + + ret = dc_link_detect_helper(link, reason); + + /* Go back to power optimized state */ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + + return ret; } bool dc_link_get_hpd_state(struct dc_link *dc_link) @@ -1206,7 +1244,7 @@ static enum transmitter translate_encoder_to_transmitter( } } -static bool construct( +static bool dc_link_construct( struct dc_link *link, const struct link_init_data *init_params) { @@ -1408,7 +1446,7 @@ struct dc_link *link_create(const struct link_init_data *init_params) if (NULL == link) goto alloc_fail; - if (false == construct(link, init_params)) + if (false == dc_link_construct(link, init_params)) goto construct_fail; return link; @@ -1422,7 +1460,7 @@ alloc_fail: void link_destroy(struct dc_link **link) { - destruct(*link); + dc_link_destruct(*link); kfree(*link); *link = NULL; } @@ -1457,10 +1495,7 @@ static enum dc_status enable_link_dp( bool skip_video_pattern; struct dc_link *link = stream->link; struct dc_link_settings link_settings = {0}; - enum dp_panel_mode panel_mode; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable; -#endif int i; bool apply_seamless_boot_optimization = false; @@ -1476,15 +1511,6 @@ static enum dc_status enable_link_dp( decide_link_settings(stream, &link_settings); if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { - /* If link settings are different than current and link already enabled - * then need to disable before programming to new rate. - */ - if (link->link_status.link_active && - (link->cur_link_settings.lane_count != link_settings.lane_count || - link->cur_link_settings.link_rate != link_settings.link_rate)) { - dp_disable_link_phy(link, pipe_ctx->stream->signal); - } - /*in case it is not on*/ link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); @@ -1492,53 +1518,32 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (!apply_seamless_boot_optimization) + if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); - dp_enable_link_phy( - link, - pipe_ctx->stream->signal, - pipe_ctx->clock_source->id, - &link_settings); - - if (stream->sink_patches.dppowerup_delay > 0) { - int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; - - msleep(delay_dp_power_up_in_ms); - } - - panel_mode = dp_get_panel_mode(link); - dp_set_panel_mode(link, panel_mode); - skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) skip_video_pattern = false; - if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, &link_settings); - - link->cur_link_settings = link_settings; - status = DC_OK; - } else if (perform_link_training_with_retries( - link, + if (perform_link_training_with_retries( &link_settings, skip_video_pattern, - LINK_TRAINING_ATTEMPTS)) { + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal)) { link->cur_link_settings = link_settings; status = DC_OK; } else status = DC_FAIL_DP_LINK_TRAINING; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_enable(link, fec_enable); -#endif return status; } @@ -2025,6 +2030,45 @@ static void write_i2c_redriver_setting( ASSERT(i2c_success); } +static void disable_link(struct dc_link *link, enum signal_type signal) +{ + /* + * TODO: implement call for dp_set_hw_test_pattern + * it is needed for compliance testing + */ + + /* Here we need to specify that encoder output settings + * need to be calculated as for the set mode, + * it will lead to querying dynamic link capabilities + * which should be done before enable output + */ + + if (dc_is_dp_signal(signal)) { + /* SST DP, eDP */ + if (dc_is_dp_sst_signal(signal)) + dp_disable_link_phy(link, signal); + else + dp_disable_link_phy_mst(link, signal); + + if (dc_is_dp_sst_signal(signal) || + link->mst_stream_alloc_table.stream_count == 0) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, false); + } + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } +} + static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -2109,6 +2153,19 @@ static enum dc_status enable_link( struct pipe_ctx *pipe_ctx) { enum dc_status status = DC_ERROR_UNEXPECTED; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + /* There's some scenarios where driver is unloaded with display + * still enabled. When driver is reloaded, it may cause a display + * to not light up if there is a mismatch between old and new + * link settings. Need to call disable first before enabling at + * new link settings. + */ + if (link->link_status.link_active) { + disable_link(link, pipe_ctx->stream->signal); + } + switch (pipe_ctx->stream->signal) { case SIGNAL_TYPE_DISPLAY_PORT: status = enable_link_dp(state, pipe_ctx); @@ -2143,44 +2200,6 @@ static enum dc_status enable_link( return status; } -static void disable_link(struct dc_link *link, enum signal_type signal) -{ - /* - * TODO: implement call for dp_set_hw_test_pattern - * it is needed for compliance testing - */ - - /* here we need to specify that encoder output settings - * need to be calculated as for the set mode, - * it will lead to querying dynamic link capabilities - * which should be done before enable output */ - - if (dc_is_dp_signal(signal)) { - /* SST DP, eDP */ - if (dc_is_dp_sst_signal(signal)) - dp_disable_link_phy(link, signal); - else - dp_disable_link_phy_mst(link, signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - - if (dc_is_dp_sst_signal(signal) || - link->mst_stream_alloc_table.stream_count == 0) { - dp_set_fec_enable(link, false); - dp_set_fec_ready(link, false); - } -#endif - } else - link->link_enc->funcs->disable_output(link->link_enc, signal); - - if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - /* MST disable link only when no stream use the link */ - if (link->mst_stream_alloc_table.stream_count <= 0) - link->link_status.link_active = false; - } else { - link->link_status.link_active = false; - } -} - static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing) { @@ -2217,7 +2236,7 @@ static bool dp_active_dongle_validate_timing( break; } - if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || + if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; @@ -2317,9 +2336,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t backlight_pwm_u16_16, uint32_t frame_ramp) { - struct dc *core_dc = link->ctx->dc; - struct abm *abm = core_dc->res_pool->abm; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct abm *abm = dc->res_pool->abm; + struct dmcu *dmcu = dc->res_pool->dmcu; unsigned int controller_id = 0; bool use_smooth_brightness = true; int i; @@ -2337,22 +2356,22 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (dc_is_embedded_signal(link->connector_signal)) { for (i = 0; i < MAX_PIPES; i++) { - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { - if (core_dc->current_state->res_ctx. + if (dc->current_state->res_ctx.pipe_ctx[i].stream) { + if (dc->current_state->res_ctx. pipe_ctx[i].stream->link == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ controller_id = - core_dc->current_state-> + dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; /* Disable brightness ramping when the display is blanked * as it can hang the DMCU */ - if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) frame_ramp = 0; } } @@ -2370,8 +2389,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, bool dc_link_set_abm_disable(const struct dc_link *link) { - struct dc *core_dc = link->ctx->dc; - struct abm *abm = core_dc->res_pool->abm; + struct dc *dc = link->ctx->dc; + struct abm *abm = dc->res_pool->abm; if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) return false; @@ -2381,15 +2400,204 @@ bool dc_link_set_abm_disable(const struct dc_link *link) return true; } -bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) +bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + + + + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); + + link->psr_allow_active = allow_active; + + return true; +} + +bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (dmcu != NULL && link->psr_feature_enabled) + dmcu->funcs->get_psr_state(dmcu, psr_state); + + return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(enum transmitter transmitter_value) +{ + switch (transmitter_value) { + case TRANSMITTER_UNIPHY_A: + return PHYLD_0; + case TRANSMITTER_UNIPHY_B: + return PHYLD_1; + case TRANSMITTER_UNIPHY_C: + return PHYLD_2; + case TRANSMITTER_UNIPHY_D: + return PHYLD_3; + case TRANSMITTER_UNIPHY_E: + return PHYLD_4; + case TRANSMITTER_UNIPHY_F: + return PHYLD_5; + case TRANSMITTER_NUTMEG_CRT: + return PHYLD_6; + case TRANSMITTER_TRAVIS_CRT: + return PHYLD_7; + case TRANSMITTER_TRAVIS_LCD: + return PHYLD_8; + case TRANSMITTER_UNIPHY_G: + return PHYLD_9; + case TRANSMITTER_COUNT: + return PHYLD_COUNT; + case TRANSMITTER_UNKNOWN: + return PHYLD_UNKNOWN; + default: + WARN_ONCE(1, "Unknown transmitter value %d\n", + transmitter_value); + return PHYLD_UNKNOWN; + } +} + +bool dc_link_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context) +{ + struct dc *dc; + struct dmcu *dmcu; + int i; + /* updateSinkPsrDpcdConfig*/ + union dpcd_psr_configuration psr_configuration; + + psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + dc = link->ctx->dc; + dmcu = dc->res_pool->dmcu; + + if (!dmcu) + return false; + + + memset(&psr_configuration, 0, sizeof(psr_configuration)); + + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = + psr_config->psr_frame_capture_indication_req; + + /* Check for PSR v2*/ + if (psr_config->psr_version == 0x2) { + /* For PSR v2 selective update. + * Indicates whether sink should start capturing + * immediately following active scan line, + * or starting with the 2nd active scan line. + */ + psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; + /*For PSR v2, determines whether Sink should generate + * IRQ_HPD when CRC mismatch is detected. + */ + psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 368, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; + psr_context->transmitterId = link->link_enc->transmitter; + psr_context->engineId = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + psr_context->controllerId = + dc->current_state->res_ctx. + pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ + psr_context->phyType = PHY_TYPE_UNIPHY; + /*PhyId is associated with the transmitter id*/ + psr_context->smuPhyId = + transmitter_to_phy_id(link->link_enc->transmitter); - if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled) - dmcu->funcs->set_psr_enable(dmcu, enable, wait); + psr_context->crtcTimingVerticalTotal = stream->timing.v_total; + psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> + timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + psr_context->psrSupportedDisplayConfig = true; + psr_context->psrExitLinkTrainingRequired = + psr_config->psr_exit_link_training_required; + psr_context->sdpTransmitLineNumDeadline = + psr_config->psr_sdp_transmit_line_num_deadline; + psr_context->psrFrameCaptureIndicationReq = + psr_config->psr_frame_capture_indication_req; + + psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + + psr_context->numberOfControllers = + link->dc->res_pool->timing_generator_count; + + psr_context->rfb_update_auto_en = true; + + /* 2 frames before enter PSR. */ + psr_context->timehyst_frames = 2; + /* half a frame + * (units in 100 lines, i.e. a value of 1 represents 100 lines) + */ + psr_context->hyst_lines = stream->timing.v_total / 2 / 100; + psr_context->aux_repeats = 10; + + psr_context->psr_level.u32all = 0; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /*skip power down the single pipe since it blocks the cstate*/ + if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +#endif + + /* SMU will perform additional powerdown sequence. + * For unsupported ASICs, set psr_level flag to skip PSR + * static screen notification to SMU. + * (Always set for DAL2, did not check ASIC) + */ + psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; + + /* Complete PSR entry before aborting to prevent intermittent + * freezes on certain eDPs + */ + psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + + /* Controls additional delay after remote frame capture before + * continuing power down, default = 0 + */ + psr_context->frame_delay = 0; + + link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + + /* psr_enabled == 0 indicates setup_psr did not succeed, but this + * should not happen since firmware should be running at this point + */ + if (link->psr_feature_enabled == 0) + ASSERT(0); return true; + } const struct dc_link_status *dc_link_get_status(const struct dc_link *link) @@ -2415,28 +2623,13 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) return dc_fixpt_div_int(mbytes_per_sec, 54); } -static int get_color_depth(enum dc_color_depth color_depth) -{ - switch (color_depth) { - case COLOR_DEPTH_666: return 6; - case COLOR_DEPTH_888: return 8; - case COLOR_DEPTH_101010: return 10; - case COLOR_DEPTH_121212: return 12; - case COLOR_DEPTH_141414: return 14; - case COLOR_DEPTH_161616: return 16; - default: return 0; - } -} - static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) { - uint32_t bpc; uint64_t kbps; struct fixed31_32 peak_kbps; uint32_t numerator; uint32_t denominator; - bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth); kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); /* @@ -2510,7 +2703,7 @@ static void update_mst_stream_alloc_table( /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm */ -static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; @@ -2521,6 +2714,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; uint8_t i; + enum act_return_status ret; DC_LOGGER_INIT(link->ctx->logger); /* enable_link_dp_mst already check link->enabled_stream_count @@ -2568,14 +2762,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) &link->mst_stream_alloc_table); /* send down message */ - dm_helpers_dp_mst_poll_for_allocation_change_trigger( + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); + if (ret != ACT_LINK_LOST) { + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -2667,16 +2863,38 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) return DC_OK; } +#if defined(CONFIG_DRM_AMD_DC_HDCP) +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + if (cp_psp && cp_psp->funcs.update_stream_config) { + struct cp_psp_stream_config config; + + memset(&config, 0, sizeof(config)); + + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id; + config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; + config.dpms_off = dpms_off; + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); + } +} +#endif void core_link_enable_stream( struct dc_state *state, struct pipe_ctx *pipe_ctx) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { stream->link->link_enc->funcs->setup( stream->link->link_enc, @@ -2692,6 +2910,7 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing, stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) @@ -2715,18 +2934,21 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing); - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { bool apply_edp_fast_boot_optimization = pipe_ctx->stream->apply_edp_fast_boot_optimization; pipe_ctx->stream->apply_edp_fast_boot_optimization = false; resource_build_info_frame(pipe_ctx); - core_dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2734,6 +2956,9 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2759,7 +2984,7 @@ void core_link_enable_stream( } } - core_dc->hwss.enable_audio_stream(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) @@ -2767,50 +2992,54 @@ void core_link_enable_stream( CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, COLOR_DEPTH_UNDEFINED); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } -#endif - core_dc->hwss.enable_stream(pipe_ctx); + dc->hwss.enable_stream(pipe_ctx); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Set DPS PPS SDP (AKA "info frames") */ if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_pps_sdp(pipe_ctx, true); } -#endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); + dc_link_allocate_mst_payload(pipe_ctx); - core_dc->hwss.unblank_stream(pipe_ctx, + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); - } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif + } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); } -#endif } void core_link_disable_stream(struct pipe_ctx *pipe_ctx) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; - core_dc->hwss.blank_stream(pipe_ctx); + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, true); +#endif + + dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); @@ -2839,25 +3068,23 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) write_i2c_redriver_setting(pipe_ctx, false); } } - core_dc->hwss.disable_stream(pipe_ctx); + dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } -#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) return; - core_dc->hwss.set_avmute(pipe_ctx, enable); + dc->hwss.set_avmute(pipe_ctx, enable); } /** @@ -2915,13 +3142,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( uint32_t bits_per_channel = 0; uint32_t kbps; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (timing->flags.DSC) { kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); return kbps; } -#endif switch (timing->display_color_depth) { case COLOR_DEPTH_666: @@ -3074,6 +3299,7 @@ void dc_link_disable_hpd(const struct dc_link *link) void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) @@ -3082,6 +3308,7 @@ void dc_link_set_test_pattern(struct dc_link *link, dc_link_dp_set_test_pattern( link, test_pattern, + test_pattern_color_space, p_link_settings, p_custom_pattern, cust_pattern_size); @@ -3097,7 +3324,6 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps *= 8; /* 8 bits per byte*/ link_bw_kbps *= link_setting->lane_count; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { /* Account for FEC overhead. * We have to do it based on caps, @@ -3122,7 +3348,6 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps = mul_u64_u32_shr(BIT_ULL(32) * 970LL / 1000, link_bw_kbps, 32); } -#endif return link_bw_kbps; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 505967b48e14..c2c136b12184 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -187,7 +187,7 @@ void dal_ddc_i2c_payloads_add( } -static void construct( +static void ddc_service_construct( struct ddc_service *ddc_service, struct ddc_service_init_data *init_data) { @@ -206,7 +206,10 @@ static void construct( ddc_service->ddc_pin = NULL; } else { hw_info.ddc_channel = i2c_info.i2c_line; - hw_info.hw_supported = i2c_info.i2c_hw_assist; + if (ddc_service->link != NULL) + hw_info.hw_supported = i2c_info.i2c_hw_assist; + else + hw_info.hw_supported = false; ddc_service->ddc_pin = dal_gpio_create_ddc( gpio_service, @@ -236,11 +239,11 @@ struct ddc_service *dal_ddc_service_create( if (!ddc_service) return NULL; - construct(ddc_service, init_data); + ddc_service_construct(ddc_service, init_data); return ddc_service; } -static void destruct(struct ddc_service *ddc) +static void ddc_service_destruct(struct ddc_service *ddc) { if (ddc->ddc_pin) dal_gpio_destroy_ddc(&ddc->ddc_pin); @@ -252,7 +255,7 @@ void dal_ddc_service_destroy(struct ddc_service **ddc) BREAK_TO_DEBUGGER(); return; } - destruct(*ddc); + ddc_service_destruct(*ddc); kfree(*ddc); *ddc = NULL; } @@ -374,6 +377,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( enum display_dongle_type *dongle = &sink_cap->dongle_type; uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; bool is_type2_dongle = false; + int retry_count = 2; struct dp_hdmi_dongle_signature_data *dongle_signature; /* Assume we have no valid DP passive dongle connected */ @@ -386,13 +390,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( DP_HDMI_DONGLE_ADDRESS, type2_dongle_buf, sizeof(type2_dongle_buf))) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + /* Passive HDMI dongles can sometimes fail here without retrying*/ + while (retry_count > 0) { + if (i2c_read(ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) + break; + retry_count--; + } + if (retry_count == 0) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - return; + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + return; + } } /* Check if Type 2 dongle.*/ @@ -496,7 +511,7 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size) { - bool ret; + bool ret = false; uint32_t payload_size = dal_ddc_service_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; @@ -515,34 +530,32 @@ bool dal_ddc_service_query_ddc_data( /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { - struct aux_payload write_payload = { - .i2c_over_aux = true, - .write = true, - .mot = true, - .address = address, - .length = write_size, - .data = write_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - struct aux_payload read_payload = { - .i2c_over_aux = true, - .write = false, - .mot = false, - .address = address, - .length = read_size, - .data = read_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - ret = dc_link_aux_transfer_with_retries(ddc, &write_payload); + struct aux_payload payload; + bool read_available = true; + + payload.i2c_over_aux = true; + payload.address = address; + payload.reply = NULL; + payload.defer_delay = get_defer_delay(ddc); + + if (write_size != 0) { + payload.write = true; + payload.mot = false; + payload.length = write_size; + payload.data = write_buf; + + ret = dal_ddc_submit_aux_command(ddc, &payload); + read_available = ret; + } - if (!ret) - return false; + if (read_size != 0 && read_available) { + payload.write = false; + payload.mot = false; + payload.length = read_size; + payload.data = read_buf; - ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); + ret = dal_ddc_submit_aux_command(ddc, &payload); + } } else { struct i2c_payloads *payloads = dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); @@ -573,6 +586,41 @@ bool dal_ddc_service_query_ddc_data( return ret; } +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) +{ + uint32_t retrieved = 0; + bool ret = 0; + + if (!ddc) + return false; + + if (!payload) + return false; + + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) > + payload->length ? true : false; + + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; + current_payload.mot = !is_end_of_payload; + current_payload.reply = payload->reply; + current_payload.write = payload->write; + + ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); + + retrieved += current_payload.length; + } while (retrieved < payload->length && ret == true); + + return ret; +} + /* dc_link_aux_transfer_raw() - Attempt to transfer * the given aux payload. This function does not perform * retries or handle error states. The reply is returned @@ -601,6 +649,19 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, return dce_aux_transfer_with_retries(ddc, payload); } + +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout) +{ + uint32_t prev_timeout = 0; + struct ddc *ddc_pin = ddc->ddc_pin; + + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) + prev_timeout = + ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); + return prev_timeout; +} + /*test only function*/ void dal_ddc_service_set_ddc_pin( struct ddc_service *ddc_service, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f5742719b5d9..42aa889fd0f5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -4,12 +4,8 @@ #include "dc_link_dp.h" #include "dm_helpers.h" #include "opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "resource.h" -#endif #include "inc/core_types.h" #include "link_hwss.h" @@ -21,6 +17,9 @@ #define DC_LOGGER \ link->ctx->logger + +#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 + /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, @@ -221,19 +220,31 @@ static enum dpcd_training_patterns return dpcd_tr_pattern; } +static inline bool is_repeater(struct dc_link *link, uint32_t offset) +{ + return (!link->is_lttpr_mode_transparent && offset != 0); +} + static void dpcd_set_lt_pattern_and_lane_settings( struct dc_link *link, const struct link_training_settings *lt_settings, - enum dc_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern, + uint32_t offset) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; - const uint32_t dpcd_base_lt_offset = - DP_TRAINING_PATTERN_SET; + + uint32_t dpcd_base_lt_offset; + uint8_t dpcd_lt_buffer[5] = {0}; union dpcd_training_pattern dpcd_pattern = { {0} }; uint32_t lane; uint32_t size_in_bytes; bool edp_workaround = false; /* TODO link_prop.INTERNAL */ + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + + if (is_repeater(link, offset)) + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); /***************************************************************** * DpcdAddress_TrainingPatternSet @@ -241,14 +252,21 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); - dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset] + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; - DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", - __func__, - DP_TRAINING_PATTERN_SET, - dpcd_pattern.v1_4.TRAINING_PATTERN_SET); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } /***************************************************************** * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set *****************************************************************/ @@ -268,24 +286,35 @@ static void dpcd_set_lt_pattern_and_lane_settings( PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); } - /* concatinate everything into one buffer*/ + /* concatenate everything into one buffer*/ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]); // 0x00103 - 0x00102 memmove( - &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset], + &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], dpcd_lane, size_in_bytes); - DC_LOG_HW_LINK_TRAINING("%s:\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - DP_TRAINING_LANE0_SET, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); - + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is * causing issues on some eDP panels (EPR#366724) @@ -495,8 +524,12 @@ static void get_lane_status_and_drive_settings( const struct link_training_settings *link_training_setting, union lane_status *ln_status, union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings) + struct link_training_settings *req_settings, + uint32_t offset) { + unsigned int lane01_status_address = DP_LANE0_1_STATUS; + uint8_t lane_adjust_offset = 4; + unsigned int lane01_adjust_address; uint8_t dpcd_buf[6] = {0}; union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; struct link_training_settings request_settings = { {0} }; @@ -504,9 +537,16 @@ static void get_lane_status_and_drive_settings( memset(req_settings, '\0', sizeof(struct link_training_settings)); + if (is_repeater(link, offset)) { + lane01_status_address = + DP_LANE0_1_STATUS_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + lane_adjust_offset = 3; + } + core_link_read_dpcd( link, - DP_LANE0_1_STATUS, + lane01_status_address, (uint8_t *)(dpcd_buf), sizeof(dpcd_buf)); @@ -517,22 +557,47 @@ static void get_lane_status_and_drive_settings( ln_status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); dpcd_lane_adjust[lane].raw = - get_nibble_at_index(&dpcd_buf[4], lane); + get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); } ln_status_updated->raw = dpcd_buf[2]; - DC_LOG_HW_LINK_TRAINING("%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ", - __func__, - DP_LANE0_1_STATUS, dpcd_buf[0], - DP_LANE2_3_STATUS, dpcd_buf[1]); - - DC_LOG_HW_LINK_TRAINING("%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n", - __func__, - DP_ADJUST_REQUEST_LANE0_1, - dpcd_buf[4], - DP_ADJUST_REQUEST_LANE2_3, - dpcd_buf[5]); + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + offset, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + } + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + + if (is_repeater(link, offset)) + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + offset, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } /*copy to req_settings*/ request_settings.link_settings.lane_count = @@ -571,10 +636,18 @@ static void get_lane_status_and_drive_settings( static void dpcd_set_lane_settings( struct dc_link *link, - const struct link_training_settings *link_training_setting) + const struct link_training_settings *link_training_setting, + uint32_t offset) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}}; uint32_t lane; + unsigned int lane0_set_address; + + lane0_set_address = DP_TRAINING_LANE0_SET; + + if (is_repeater(link, offset)) + lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); for (lane = 0; lane < (uint32_t)(link_training_setting-> @@ -597,7 +670,7 @@ static void dpcd_set_lane_settings( } core_link_write_dpcd(link, - DP_TRAINING_LANE0_SET, + lane0_set_address, (uint8_t *)(dpcd_lane), link_training_setting->link_settings.lane_count); @@ -620,14 +693,26 @@ static void dpcd_set_lane_settings( } */ - DC_LOG_HW_LINK_TRAINING("%s\n %x VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - DP_TRAINING_LANE0_SET, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + if (is_repeater(link, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + lane0_set_address, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + lane0_set_address, + dpcd_lane[0].bits.VOLTAGE_SWING_SET, + dpcd_lane[0].bits.PRE_EMPHASIS_SET, + dpcd_lane[0].bits.MAX_SWING_REACHED, + dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + } link->cur_lane_setting = link_training_setting->lane_settings[0]; } @@ -647,17 +732,6 @@ static bool is_max_vs_reached( } -void dc_link_dp_set_drive_settings( - struct dc_link *link, - struct link_training_settings *lt_settings) -{ - /* program ASIC PHY settings*/ - dp_set_hw_lane_settings(link, lt_settings); - - /* Notify DP sink the PHY settings from source */ - dpcd_set_lane_settings(link, lt_settings); -} - static bool perform_post_lt_adj_req_sequence( struct dc_link *link, struct link_training_settings *lt_settings) @@ -690,7 +764,8 @@ static bool perform_post_lt_adj_req_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + DPRX); if (dpcd_lane_status_updated.bits. POST_LT_ADJ_REQ_IN_PROGRESS == 0) @@ -747,6 +822,31 @@ static bool perform_post_lt_adj_req_sequence( } +/* Only used for channel equalization */ +static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ + unsigned int aux_rd_interval_us = 400; + + switch (dpcd_aux_read_interval) { + case 0x01: + aux_rd_interval_us = 400; + break; + case 0x02: + aux_rd_interval_us = 4000; + break; + case 0x03: + aux_rd_interval_us = 8000; + break; + case 0x04: + aux_rd_interval_us = 16000; + break; + default: + break; + } + + return aux_rd_interval_us; +} + static enum link_training_result get_cr_failure(enum dc_lane_count ln_count, union lane_status *dpcd_lane_status) { @@ -765,37 +865,55 @@ static enum link_training_result get_cr_failure(enum dc_lane_count ln_count, static enum link_training_result perform_channel_equalization_sequence( struct dc_link *link, - struct link_training_settings *lt_settings) + struct link_training_settings *lt_settings, + uint32_t offset) { struct link_training_settings req_settings; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; + uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; - dp_set_hw_training_pattern(link, tr_pattern); + if (is_repeater(link, offset)) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + dp_set_hw_training_pattern(link, tr_pattern, offset); for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { - dp_set_hw_lane_settings(link, lt_settings); + dp_set_hw_lane_settings(link, lt_settings, offset); /* 2. update DPCD*/ if (!retries_ch_eq) /* EPR #361076 - write as a 5-byte burst, - * but only for the 1-st iteration*/ + * but only for the 1-st iteration + */ + dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern); + tr_pattern, offset); else - dpcd_set_lane_settings(link, lt_settings); + dpcd_set_lane_settings(link, lt_settings, offset); /* 3. wait for receiver to lock-on*/ - wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); + wait_time_microsec = lt_settings->eq_pattern_time; + + if (is_repeater(link, offset)) + wait_time_microsec = + translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + + wait_for_training_aux_rd_interval( + link, + wait_time_microsec); /* 4. Read lane status and requested * drive settings as set by the sink*/ @@ -805,7 +923,8 @@ static enum link_training_result perform_channel_equalization_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + offset); /* 5. check CR done*/ if (!is_cr_done(lane_count, dpcd_lane_status)) @@ -824,13 +943,16 @@ static enum link_training_result perform_channel_equalization_sequence( return LINK_TRAINING_EQ_FAIL_EQ; } +#define TRAINING_AUX_RD_INTERVAL 100 //us static enum link_training_result perform_clock_recovery_sequence( struct dc_link *link, - struct link_training_settings *lt_settings) + struct link_training_settings *lt_settings, + uint32_t offset) { uint32_t retries_cr; uint32_t retry_count; + uint32_t wait_time_microsec; struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1; @@ -840,7 +962,7 @@ static enum link_training_result perform_clock_recovery_sequence( retries_cr = 0; retry_count = 0; - dp_set_hw_training_pattern(link, tr_pattern); + dp_set_hw_training_pattern(link, tr_pattern, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -857,7 +979,8 @@ static enum link_training_result perform_clock_recovery_sequence( /* 1. call HWSS to set lane settings*/ dp_set_hw_lane_settings( link, - lt_settings); + lt_settings, + offset); /* 2. update DPCD of the receiver*/ if (!retries_cr) @@ -866,16 +989,23 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - tr_pattern); + tr_pattern, + offset); else dpcd_set_lane_settings( link, - lt_settings); + lt_settings, + offset); /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + if (!link->is_lttpr_mode_transparent) + wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + wait_for_training_aux_rd_interval( link, - lt_settings->cr_pattern_time); + wait_time_microsec); /* 4. Read lane status and requested drive * settings as set by the sink @@ -885,7 +1015,8 @@ static enum link_training_result perform_clock_recovery_sequence( lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings); + &req_settings, + offset); /* 5. check CR done*/ if (is_cr_done(lane_count, dpcd_lane_status)) @@ -1054,6 +1185,93 @@ static void initialize_training_settings( lt_settings->enhanced_framing = 1; } +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +static void configure_lttpr_mode(struct dc_link *link) +{ + /* aux timeout is already set to extended */ + /* RESET/SET lttpr mode to enable non transparent mode */ + uint8_t repeater_cnt; + uint32_t aux_interval_address; + uint8_t repeater_id; + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (!link->is_lttpr_mode_transparent) { + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; + core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } + } +} + +static void repeater_training_done(struct dc_link *link, uint32_t offset) +{ + union dpcd_training_pattern dpcd_pattern = { {0} }; + + const uint32_t dpcd_base_lt_offset = + DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + /* Set training not in progress*/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + static void print_status_message( struct dc_link *link, const struct link_training_settings *lt_settings, @@ -1133,6 +1351,17 @@ static void print_status_message( lt_spread); } +void dc_link_dp_set_drive_settings( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + /* program ASIC PHY settings*/ + dp_set_hw_lane_settings(link, lt_settings, DPRX); + + /* Notify DP sink the PHY settings from source */ + dpcd_set_lane_settings(link, lt_settings, DPRX); +} + bool dc_link_dp_perform_link_training_skip_aux( struct dc_link *link, const struct dc_link_settings *link_setting) @@ -1149,10 +1378,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 1. Perform_clock_recovery_sequence. */ /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, pattern_for_cr); + dp_set_hw_training_pattern(link, pattern_for_cr, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings); + dp_set_hw_lane_settings(link, <_settings, DPRX); /* wait receiver to lock-on*/ wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); @@ -1160,10 +1389,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 2. Perform_channel_equalization_sequence. */ /* transmit training pattern for channel equalization. */ - dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq); + dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings); + dp_set_hw_lane_settings(link, <_settings, DPRX); /* wait receiver to lock-on. */ wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); @@ -1185,9 +1414,10 @@ enum link_training_result dc_link_dp_perform_link_training( { enum link_training_result status = LINK_TRAINING_SUCCESS; struct link_training_settings lt_settings; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + bool fec_enable; -#endif + uint8_t repeater_cnt; + uint8_t repeater_id; initialize_training_settings( link, @@ -1198,23 +1428,47 @@ enum link_training_result dc_link_dp_perform_link_training( /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; dp_set_fec_ready(link, fec_enable); -#endif + if (!link->is_lttpr_mode_transparent) { + /* Configure lttpr mode */ + configure_lttpr_mode(link); + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_clock_recovery_sequence(link, <_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + status = perform_channel_equalization_sequence(link, + <_settings, + repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + repeater_training_done(link, repeater_id); + } + } - /* 2. perform link training (set link training done - * to false is done as well) - */ - status = perform_clock_recovery_sequence(link, <_settings); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_clock_recovery_sequence(link, <_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) { status = perform_channel_equalization_sequence(link, - <_settings); + <_settings, + DPRX); + } } if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { @@ -1233,23 +1487,58 @@ enum link_training_result dc_link_dp_perform_link_training( } bool perform_link_training_with_retries( - struct dc_link *link, const struct dc_link_settings *link_setting, bool skip_video_pattern, - int attempts) + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal) { uint8_t j; uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dp_panel_mode panel_mode = dp_get_panel_mode(link); for (j = 0; j < attempts; ++j) { - if (dc_link_dp_perform_link_training( + dp_enable_link_phy( + link, + signal, + pipe_ctx->clock_source->id, + link_setting); + + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + + msleep(delay_dp_power_up_in_ms); + } + + dp_set_panel_mode(link, panel_mode); + + /* We need to do this before the link training to ensure the idle pattern in SST + * mode will be sent right after the link training + */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + + if (link->aux_access_disabled) { + dc_link_dp_perform_link_training_skip_aux(link, link_setting); + return true; + } else if (dc_link_dp_perform_link_training( link, link_setting, skip_video_pattern) == LINK_TRAINING_SUCCESS) return true; + /* latest link training still fail, skip delay and keep PHY on + */ + if (j == (attempts - 1)) + break; + + dp_disable_link_phy(link, signal); + msleep(delay_between_attempts); + delay_between_attempts += LINK_TRAINING_RETRY_DELAY; } @@ -1321,9 +1610,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt( enum link_training_result lt_status = LINK_TRAINING_SUCCESS; enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT; enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool fec_enable = false; -#endif initialize_training_settings( link, @@ -1343,11 +1630,9 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_enable_link_phy(link, link->connector_signal, dp_cs_id, link_settings); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Set FEC enable */ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; dp_set_fec_ready(link, fec_enable); -#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -1367,10 +1652,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt( /* 2. perform link training (set link training done * to false is done as well) */ - lt_status = perform_clock_recovery_sequence(link, <_settings); + lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX); if (lt_status == LINK_TRAINING_SUCCESS) { lt_status = perform_channel_equalization_sequence(link, - <_settings); + <_settings, + DPRX); } /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/ @@ -1387,9 +1673,7 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) */ if (link_down == true) { dp_disable_link_phy(link, link->connector_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT dp_set_fec_ready(link, false); -#endif } link->sync_lt_in_progress = false; @@ -1409,6 +1693,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; + if (link->link_enc->funcs->get_max_link_cap) + link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) max_link_cap.lane_count = @@ -1420,6 +1707,22 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) max_link_cap.link_spread) max_link_cap.link_spread = link->reported_link_cap.link_spread; + /* + * account for lttpr repeaters cap + * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). + */ + if (!link->is_lttpr_mode_transparent) { + if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + + if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", + __func__, + max_link_cap.lane_count, + max_link_cap.link_rate); + } return max_link_cap; } @@ -1565,6 +1868,13 @@ bool dp_verify_link_cap( max_link_cap = get_max_link_cap(link); + /* Grant extended timeout request */ + if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { + uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + + core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); + } + /* TODO implement override and monitor patch later */ /* try to train the link from high to low to @@ -1653,11 +1963,14 @@ bool dp_verify_link_cap_with_retries( for (i = 0; i < attempts; i++) { int fail_count = 0; - enum dc_connection_type type; + enum dc_connection_type type = dc_connection_none; memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); - if (!dc_link_detect_sink(link, &type)) { + if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { + link->verified_link_cap.lane_count = LANE_COUNT_ONE; + link->verified_link_cap.link_rate = LINK_RATE_LOW; + link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; break; } else if (dp_verify_link_cap(link, &link->reported_link_cap, @@ -1670,6 +1983,19 @@ bool dp_verify_link_cap_with_retries( return success; } +bool dp_verify_mst_link_cap( + struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + + max_link_cap = get_max_link_cap(link); + link->verified_link_cap = get_common_supported_link_settings( + link->reported_link_cap, + max_link_cap); + + return true; +} + static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b) @@ -2057,11 +2383,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link) return false; } -static bool handle_hpd_irq_psr_sink(const struct dc_link *link) +static bool handle_hpd_irq_psr_sink(struct dc_link *link) { union dpcd_psr_configuration psr_configuration; - if (!link->psr_enabled) + if (!link->psr_feature_enabled) return false; dm_helpers_dp_read_dpcd( @@ -2100,8 +2426,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - dc_link_set_psr_enable(link, false, true); - dc_link_set_psr_enable(link, true, true); + dc_link_set_psr_allow_active(link, false, true); + dc_link_set_psr_allow_active(link, true, true); return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -2261,6 +2587,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) dc_link_dp_set_test_pattern( link, test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, &link_training_settings, test_80_bit_pattern, (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - @@ -2272,6 +2599,8 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) union link_test_pattern dpcd_test_pattern; union test_misc dpcd_test_params; enum dp_test_pattern test_pattern; + enum dp_test_pattern_color_space test_pattern_color_space = + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); @@ -2306,14 +2635,105 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) break; } + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + dc_link_dp_set_test_pattern( link, test_pattern, + test_pattern_color_space, NULL, NULL, 0); } +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ + union audio_test_mode dpcd_test_mode = {0}; + struct audio_test_pattern_type dpcd_pattern_type = {0}; + union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = &pipes[0]; + unsigned int channel_count; + unsigned int channel = 0; + unsigned int modes = 0; + unsigned int sampling_rate_in_hz = 0; + + // get audio test mode and test pattern parameters + core_link_read_dpcd( + link, + DP_TEST_AUDIO_MODE, + &dpcd_test_mode.raw, + sizeof(dpcd_test_mode)); + + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PATTERN_TYPE, + &dpcd_pattern_type.value, + sizeof(dpcd_pattern_type)); + + channel_count = dpcd_test_mode.bits.channel_count + 1; + + // read pattern periods for requested channels when sawTooth pattern is requested + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || + dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + + test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? + DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + // read period for each channel + for (channel = 0; channel < channel_count; channel++) { + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PERIOD_CH1 + channel, + &dpcd_pattern_period[channel].raw, + sizeof(dpcd_pattern_period[channel])); + } + } + + // translate sampling rate + switch (dpcd_test_mode.bits.sampling_rate) { + case AUDIO_SAMPLING_RATE_32KHZ: + sampling_rate_in_hz = 32000; + break; + case AUDIO_SAMPLING_RATE_44_1KHZ: + sampling_rate_in_hz = 44100; + break; + case AUDIO_SAMPLING_RATE_48KHZ: + sampling_rate_in_hz = 48000; + break; + case AUDIO_SAMPLING_RATE_88_2KHZ: + sampling_rate_in_hz = 88200; + break; + case AUDIO_SAMPLING_RATE_96KHZ: + sampling_rate_in_hz = 96000; + break; + case AUDIO_SAMPLING_RATE_176_4KHZ: + sampling_rate_in_hz = 176400; + break; + case AUDIO_SAMPLING_RATE_192KHZ: + sampling_rate_in_hz = 192000; + break; + default: + sampling_rate_in_hz = 0; + break; + } + + link->audio_test_data.flags.test_requested = 1; + link->audio_test_data.flags.disable_video = disable_video; + link->audio_test_data.sampling_rate = sampling_rate_in_hz; + link->audio_test_data.channel_count = channel_count; + link->audio_test_data.pattern_type = test_pattern; + + if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { + for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { + link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; + } + } +} + static void handle_automated_test(struct dc_link *link) { union test_request test_request; @@ -2343,6 +2763,12 @@ static void handle_automated_test(struct dc_link *link) dp_test_send_link_test_pattern(link); test_response.bits.ACK = 1; } + + if (test_request.bits.AUDIO_TEST_PATTERN) { + dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); + test_response.bits.ACK = 1; + } + if (test_request.bits.PHY_TEST_PATTERN) { dp_test_send_phy_test_pattern(link); test_response.bits.ACK = 1; @@ -2362,8 +2788,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; union device_service_irq device_service_clear = { { 0 } }; enum dc_status result; - bool status = false; + struct pipe_ctx *pipe_ctx; + struct dc_link_settings previous_link_settings; + int i; if (out_link_loss) *out_link_loss = false; @@ -2436,9 +2864,28 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd sizeof(hpd_irq_dpcd_data), "Status: "); - perform_link_training_with_retries(link, - &link->cur_link_settings, - true, LINK_TRAINING_ATTEMPTS); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + break; + } + + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return false; + + previous_link_settings = link->cur_link_settings; + dp_disable_link_phy(link, pipe_ctx->stream->signal); + + perform_link_training_with_retries(&previous_link_settings, + true, LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal); + + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_link_allocate_mst_payload(pipe_ctx); + } status = false; if (out_link_loss) @@ -2545,6 +2992,7 @@ static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { @@ -2666,7 +3114,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, int length) { int retry = 0; - union dp_downstream_port_present ds_port = { 0 }; if (!link->dpcd_caps.dpcd_rev.raw) { do { @@ -2679,9 +3126,6 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); } - ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - - DP_DPCD_REV]; - if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { switch (link->dpcd_caps.branch_dev_id) { /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down @@ -2691,6 +3135,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, * keep receiver powered all the time.*/ case DP_BRANCH_DEVICE_ID_0010FA: case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: link->wa_flags.dp_keep_receiver_powered = true; break; @@ -2705,7 +3150,11 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, static bool retrieve_link_cap(struct dc_link *link) { - uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + uint8_t lttpr_dpcd_data[6]; /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. */ @@ -2721,7 +3170,19 @@ static bool retrieve_link_cap(struct dc_link *link) int i; struct dp_sink_hw_fw_revision dp_hw_fw_revision; + /* Set default timeout to 3.2ms and read LTTPR capabilities */ + bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support && + !link->dc->config.disable_extended_timeout_support; + + link->is_lttpr_mode_transparent = true; + + if (ext_timeout_support) { + dc_link_aux_configure_timeout(link->ddc, + LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD); + } + memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); memset(&down_strm_port_count, '\0', sizeof(union down_stream_port_count)); memset(&edp_config_cap, '\0', @@ -2753,6 +3214,52 @@ static bool retrieve_link_cap(struct dc_link *link) return false; } + if (ext_timeout_support) { + + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + lttpr_dpcd_data, + sizeof(lttpr_dpcd_data)); + + link->dpcd_caps.lttpr_caps.revision.raw = + lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_link_rate = + lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = + lttpr_dpcd_data[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_lane_count = + lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.mode = + lttpr_dpcd_data[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_ext_timeout = + lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && + link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) { + link->is_lttpr_mode_transparent = false; + } else { + /*No lttpr reset timeout to its default value*/ + link->is_lttpr_mode_transparent = true; + dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + } + + CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); + } + { union training_aux_rd_interval aux_rd_interval; @@ -2891,7 +3398,6 @@ static bool retrieve_link_cap(struct dc_link *link) dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); @@ -2913,7 +3419,6 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw)); } -#endif /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -3035,21 +3540,20 @@ static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *pipe_ctx, - enum dp_test_pattern test_pattern) + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space) { enum controller_dp_test_pattern controller_test_pattern; enum dc_color_depth color_depth = pipe_ctx-> stream->timing.display_color_depth; struct bit_depth_reduction_params params; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) int width = pipe_ctx->stream->timing.h_addressable + pipe_ctx->stream->timing.h_border_left + pipe_ctx->stream->timing.h_border_right; int height = pipe_ctx->stream->timing.v_addressable + pipe_ctx->stream->timing.v_border_bottom + pipe_ctx->stream->timing.v_border_top; -#endif memset(¶ms, 0, sizeof(params)); @@ -3093,11 +3597,29 @@ static void set_crtc_test_pattern(struct dc_link *link, if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, controller_test_pattern, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; + enum controller_dp_color_space controller_color_space; int opp_cnt = 1; + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; + break; + case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: + default: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); + ASSERT(0); + break; + } + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; @@ -3109,6 +3631,7 @@ static void set_crtc_test_pattern(struct dc_link *link, odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, controller_test_pattern, + controller_color_space, color_depth, NULL, width, @@ -3116,12 +3639,12 @@ static void set_crtc_test_pattern(struct dc_link *link, } opp->funcs->opp_set_disp_pattern_generator(opp, controller_test_pattern, + controller_color_space, color_depth, NULL, width, height); } -#endif } break; case DP_TEST_PATTERN_VIDEO_MODE: @@ -3134,7 +3657,6 @@ static void set_crtc_test_pattern(struct dc_link *link, pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, color_depth); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -3149,6 +3671,7 @@ static void set_crtc_test_pattern(struct dc_link *link, odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, width, @@ -3156,12 +3679,12 @@ static void set_crtc_test_pattern(struct dc_link *link, } opp->funcs->opp_set_disp_pattern_generator(opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, width, height); } -#endif } break; @@ -3173,6 +3696,7 @@ static void set_crtc_test_pattern(struct dc_link *link, bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size) @@ -3201,7 +3725,7 @@ bool dc_link_dp_set_test_pattern( if (link->test_pattern_enabled && test_pattern == DP_TEST_PATTERN_VIDEO_MODE) { /* Set CRTC Test Pattern */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern); + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); dp_set_hw_test_pattern(link, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); @@ -3224,8 +3748,8 @@ bool dc_link_dp_set_test_pattern( if (is_dp_phy_pattern(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { - dp_set_hw_lane_settings(link, p_link_settings); - dpcd_set_lane_settings(link, p_link_settings); + dp_set_hw_lane_settings(link, p_link_settings, DPRX); + dpcd_set_lane_settings(link, p_link_settings, DPRX); } /* Blank stream if running test pattern */ @@ -3316,7 +3840,7 @@ bool dc_link_dp_set_test_pattern( } } else { /* CRTC Patterns */ - set_crtc_test_pattern(link, pipe_ctx, test_pattern); + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); /* Set Test Pattern state */ link->test_pattern_enabled = true; } @@ -3436,7 +3960,6 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) return DP_PANEL_MODE_DEFAULT; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void dp_set_fec_ready(struct dc_link *link, bool ready) { /* FEC has to be "set ready" before the link training. @@ -3490,7 +4013,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) if (link_enc->funcs->fec_set_enable && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { if (link->fec_state == dc_link_fec_ready && enable) { - msleep(1); + /* Accord to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum + * time for transmitting 1000 LL codes which is 6.173 us. + * So use 7 microseconds delay instead. + */ + udelay(7); link_enc->funcs->fec_set_enable(link_enc, true); link->fec_state = dc_link_fec_enabled; } else if (link->fec_state == dc_link_fec_enabled && !enable) { @@ -3499,5 +4029,4 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 79438c4f1e20..548aac02ca11 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -12,12 +12,38 @@ #include "dc_link_ddc.h" #include "dm_helpers.h" #include "dpcd_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "resource.h" -#endif + +static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); +} enum dc_status core_link_read_dpcd( struct dc_link *link, @@ -69,8 +95,8 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings) { struct link_encoder *link_enc = link->link_enc; - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; @@ -174,8 +200,8 @@ bool edp_receiver_ready_T7(struct dc_link *link) void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { - struct dc *core_dc = link->ctx->dc; - struct dmcu *dmcu = core_dc->res_pool->dmcu; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; if (!link->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(link, false); @@ -212,7 +238,8 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) bool dp_set_hw_training_pattern( struct dc_link *link, - enum dc_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern, + uint32_t offset) { enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; @@ -240,10 +267,14 @@ bool dp_set_hw_training_pattern( void dp_set_hw_lane_settings( struct dc_link *link, - const struct link_training_settings *link_settings) + const struct link_training_settings *link_settings, + uint32_t offset) { struct link_encoder *encoder = link->link_enc; + if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset)) + return; + /* call Encoder to set lane settings */ encoder->funcs->dp_set_lane_settings(encoder, link_settings); } @@ -277,7 +308,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, if (pipes[i].stream != NULL && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL) { + pipes[i].stream_res.stream_enc != NULL && + pipes[i].stream->link == link) { udelay(100); pipes[i].stream_res.stream_enc->funcs->dp_blank( @@ -301,20 +333,12 @@ void dp_retrain_link_dp_test(struct dc_link *link, memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); - link->link_enc->funcs->enable_dp_output( - link->link_enc, - link_setting, - pipes[i].clock_source->id); - link->cur_link_settings = *link_setting; - - dp_receiver_power_ctrl(link, true); - perform_link_training_with_retries( - link, link_setting, skip_video_pattern, - LINK_TRAINING_ATTEMPTS); - + LINK_TRAINING_ATTEMPTS, + &pipes[i], + SIGNAL_TYPE_DISPLAY_PORT); link->dc->hwss.enable_stream(&pipes[i]); @@ -338,7 +362,6 @@ void dp_retrain_link_dp_test(struct dc_link *link, } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define DC_LOGGER \ dsc->ctx->logger static void dsc_optc_config_log(struct display_stream_compressor *dsc, @@ -364,14 +387,14 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc, static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) { - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; bool result = false; - if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) result = true; else - result = dm_helpers_dp_write_dsc_enable(core_dc->ctx, stream, enable); + result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); return result; } @@ -381,7 +404,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -417,7 +440,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -442,7 +465,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) OPTC_DSC_DISABLED, 0, 0); /* disable DSC in stream encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -485,7 +508,7 @@ out: bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; if (!pipe_ctx->stream->timing.flags.DSC || !dsc) @@ -504,7 +527,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) DC_LOG_DSC(" "); dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, @@ -513,7 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) } } else { /* disable DSC PPS in stream encoder */ - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, false, NULL); } @@ -536,5 +559,4 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) dp_set_dsc_pps_sdp(pipe_ctx, true); return true; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 8f70295179ff..0c19de678339 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -46,15 +46,11 @@ #include "dce100/dce100_resource.h" #include "dce110/dce110_resource.h" #include "dce112/dce112_resource.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/dcn10_resource.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/dcn20_resource.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/dcn21_resource.h" -#endif #include "dce120/dce120_resource.h" #define DC_LOGGER_INIT(logger) @@ -99,23 +95,19 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) else dc_version = DCE_VERSION_12_0; break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case FAMILY_RV: dc_version = DCN_VERSION_1_0; if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_1_01; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_2_1; -#endif break; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case FAMILY_NV: dc_version = DCN_VERSION_2_0; break; -#endif default: dc_version = DCE_VERSION_UNKNOWN; break; @@ -162,20 +154,16 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, init_data->num_virtual_links, dc); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: res_pool = dcn10_create_resource_pool(init_data, dc); break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: res_pool = dcn20_create_resource_pool(init_data, dc); break; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: res_pool = dcn21_create_resource_pool(init_data, dc); break; @@ -404,6 +392,9 @@ bool resource_are_streams_timing_synchronizable( if (stream1->view_format != stream2->view_format) return false; + if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param) + return false; + return true; } static bool is_dp_and_hdmi_sharable( @@ -948,14 +939,33 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } -static bool are_rect_integer_multiples(struct rect src, struct rect dest) + +static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) { - if (dest.width >= src.width && dest.width % src.width == 0 && - dest.height >= src.height && dest.height % src.height == 0) - return true; + unsigned int integer_multiple = 1; - return false; + if (pipe_ctx->plane_state->scaling_quality.integer_scaling) { + // calculate maximum # of replication of src onto addressable + integer_multiple = min( + pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, + pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); + + //scale dst + pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; + pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; + + //center dst onto addressable + pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; + pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; + + //We are guaranteed that we are scaling in integer ratio + pipe_ctx->plane_state->scaling_quality.v_taps = 1; + pipe_ctx->plane_state->scaling_quality.h_taps = 1; + pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; + pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; + } } + bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -969,6 +979,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); + calculate_integer_scaling(pipe_ctx); + calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); @@ -999,13 +1011,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); - if (res && - plane_state->scaling_quality.integer_scaling && - are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport, - pipe_ctx->plane_res.scl_data.recout)) { - pipe_ctx->plane_res.scl_data.taps.v_taps = 1; - pipe_ctx->plane_res.scl_data.taps.h_taps = 1; - } if (!res) { /* Try 24 bpp linebuffer */ @@ -1187,7 +1192,7 @@ static struct pipe_ctx *acquire_free_pipe_for_head( return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static int acquire_first_split_pipe( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1268,7 +1273,7 @@ bool dc_add_plane_to_context( free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); - #if defined(CONFIG_DRM_AMD_DC_DCN1_0) + #if defined(CONFIG_DRM_AMD_DC_DCN) if (!free_pipe) { int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); if (pipe_idx >= 0) @@ -1540,6 +1545,9 @@ bool dc_is_stream_unchanged( if (!are_stream_backends_same(old_stream, stream)) return false; + if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) + return false; + return true; } @@ -1629,7 +1637,8 @@ static int acquire_first_free_pipe( static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, - enum engine_id id) + enum engine_id id, + enum dce_version dc_version) { int i, available_audio_count; @@ -1848,31 +1857,31 @@ static int acquire_resource_from_hw_enabled_state( struct dc_stream_state *stream) { struct dc_link *link = stream->link; - unsigned int inst, tg_inst; + unsigned int i, inst, tg_inst = 0; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return -1; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (inst >= pool->pipe_count) + if (inst == ENGINE_ID_UNKNOWN) return -1; - if (inst >= pool->stream_enc_count) - return -1; + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i]->id == inst) { + tg_inst = pool->stream_enc[i]->funcs->dig_source_otg( + pool->stream_enc[i]); + break; + } + } - tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]); + // tg_inst not found + if (i == pool->stream_enc_count) + return -1; if (tg_inst >= pool->timing_generator_count) - return false; + return -1; if (!res_ctx->pipe_ctx[tg_inst].stream) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst]; @@ -1938,7 +1947,7 @@ enum dc_status resource_map_pool_resources( /* acquire new resources */ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN if (pipe_idx < 0) pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); #endif @@ -1965,7 +1974,7 @@ enum dc_status resource_map_pool_resources( dc_is_audio_capable_signal(pipe_ctx->stream->signal) && stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( - &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); /* * Audio assigned in order first come first get. @@ -2738,9 +2747,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) { - struct dc *core_dc = dc; struct dc_link *link = stream->link; - struct timing_generator *tg = core_dc->res_pool->timing_generators[0]; + struct timing_generator *tg = dc->res_pool->timing_generators[0]; enum dc_status res = DC_OK; calculate_phy_pix_clks(stream); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index 5cbfdf1c4b11..a249a0e5edd0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,7 +33,7 @@ * Private functions ******************************************************************************/ -static void destruct(struct dc_sink *sink) +static void dc_sink_destruct(struct dc_sink *sink) { if (sink->dc_container_id) { kfree(sink->dc_container_id); @@ -41,7 +41,7 @@ static void destruct(struct dc_sink *sink) } } -static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) +static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { struct dc_link *link = init_params->link; @@ -75,7 +75,7 @@ void dc_sink_retain(struct dc_sink *sink) static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); - destruct(sink); + dc_sink_destruct(sink); kfree(sink); } @@ -91,7 +91,7 @@ struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params) if (NULL == sink) goto alloc_fail; - if (false == construct(sink, init_params)) + if (false == dc_sink_construct(sink, init_params)) goto construct_fail; kref_init(&sink->refcount); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bf1d7bb90e0f..b43a4b115fd8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -27,14 +27,12 @@ #include <linux/slab.h> #include "dm_services.h" +#include "basics/dc_common.h" #include "dc.h" #include "core_types.h" #include "resource.h" #include "ipp.h" #include "timing_generator.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) -#include "dcn10/dcn10_hw_sequencer.h" -#endif #define DC_LOGGER dc->ctx->logger @@ -58,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static void construct(struct dc_stream_state *stream, +static void dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -108,7 +106,6 @@ static void construct(struct dc_stream_state *stream, /* EDID CAP translation for HDMI 2.0 */ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg)); stream->timing.dsc_cfg.num_slices_h = 0; stream->timing.dsc_cfg.num_slices_v = 0; @@ -117,7 +114,6 @@ static void construct(struct dc_stream_state *stream, stream->timing.dsc_cfg.linebuf_depth = 9; stream->timing.dsc_cfg.version_minor = 2; stream->timing.dsc_cfg.ycbcr422_simple = 0; -#endif update_stream_signal(stream, dc_sink_data); @@ -129,7 +125,7 @@ static void construct(struct dc_stream_state *stream, stream->ctx->dc_stream_id_count++; } -static void destruct(struct dc_stream_state *stream) +static void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { @@ -147,7 +143,7 @@ static void dc_stream_free(struct kref *kref) { struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount); - destruct(stream); + dc_stream_destruct(stream); kfree(stream); } @@ -170,7 +166,7 @@ struct dc_stream_state *dc_create_stream_for_sink( if (stream == NULL) return NULL; - construct(stream, sink); + dc_stream_construct(stream, sink); kref_init(&stream->refcount); @@ -237,7 +233,7 @@ struct dc_stream_status *dc_stream_get_status( static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned int vupdate_line; unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos; struct dc_stream_state *stream = pipe_ctx->stream; @@ -246,7 +242,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) if (stream->ctx->asic_id.chip_family == FAMILY_RV && ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { - vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); + vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) return; @@ -272,7 +268,7 @@ bool dc_stream_set_cursor_attributes( const struct dc_cursor_attributes *attributes) { int i; - struct dc *core_dc; + struct dc *dc; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; @@ -290,8 +286,8 @@ bool dc_stream_set_cursor_attributes( return false; } - core_dc = stream->ctx->dc; - res_ctx = &core_dc->current_state->res_ctx; + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; stream->cursor_attributes = *attributes; for (i = 0; i < MAX_PIPES; i++) { @@ -303,17 +299,17 @@ bool dc_stream_set_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - delay_cursor_until_vupdate(pipe_ctx, core_dc); - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + delay_cursor_until_vupdate(pipe_ctx, dc); + dc->hwss.pipe_control_lock(dc, pipe_to_program, true); } - core_dc->hwss.set_cursor_attribute(pipe_ctx); - if (core_dc->hwss.set_cursor_sdr_white_level) - core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } if (pipe_to_program) - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + dc->hwss.pipe_control_lock(dc, pipe_to_program, false); return true; } @@ -323,7 +319,7 @@ bool dc_stream_set_cursor_position( const struct dc_cursor_position *position) { int i; - struct dc *core_dc; + struct dc *dc; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; @@ -337,8 +333,8 @@ bool dc_stream_set_cursor_position( return false; } - core_dc = stream->ctx->dc; - res_ctx = &core_dc->current_state->res_ctx; + dc = stream->ctx->dc; + res_ctx = &dc->current_state->res_ctx; stream->cursor_position = *position; for (i = 0; i < MAX_PIPES; i++) { @@ -354,20 +350,19 @@ bool dc_stream_set_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - delay_cursor_until_vupdate(pipe_ctx, core_dc); - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + delay_cursor_until_vupdate(pipe_ctx, dc); + dc->hwss.pipe_control_lock(dc, pipe_to_program, true); } - core_dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_position(pipe_ctx); } if (pipe_to_program) - core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + dc->hwss.pipe_control_lock(dc, pipe_to_program, false); return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info) @@ -423,10 +418,10 @@ bool dc_stream_add_writeback(struct dc *dc, if (dwb->funcs->is_enabled(dwb)) { /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info); + dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); } else { /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info); + dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); } } @@ -480,14 +475,13 @@ bool dc_stream_remove_writeback(struct dc *dc, return true; } -#endif uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) { uint8_t i; - struct dc *core_dc = stream->ctx->dc; + struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = - &core_dc->current_state->res_ctx; + &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -544,9 +538,9 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, { uint8_t i; bool ret = false; - struct dc *core_dc = stream->ctx->dc; + struct dc *dc = stream->ctx->dc; struct resource_context *res_ctx = - &core_dc->current_state->res_ctx; + &dc->current_state->res_ctx; for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -567,10 +561,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, return ret; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) { - bool status = true; struct pipe_ctx *pipe = NULL; int i; @@ -586,8 +578,7 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; - status = dc->hwss.dmdata_status_done(pipe); - return status; + return dc->hwss.dmdata_status_done(pipe); } bool dc_stream_set_dynamic_metadata(struct dc *dc, @@ -630,7 +621,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, return true; } -#endif void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index b9d6a5bd8522..ea1229a3e2b2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -37,7 +37,7 @@ /******************************************************************************* * Private functions ******************************************************************************/ -static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; @@ -50,7 +50,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->in_transfer_func->type = TF_TYPE_BYPASS; plane_state->in_transfer_func->ctx = ctx; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) plane_state->in_shaper_func = dc_create_transfer_func(); if (plane_state->in_shaper_func != NULL) { plane_state->in_shaper_func->type = TF_TYPE_BYPASS; @@ -67,10 +66,9 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->blend_tf->ctx = ctx; } -#endif } -static void destruct(struct dc_plane_state *plane_state) +static void dc_plane_destruct(struct dc_plane_state *plane_state) { if (plane_state->gamma_correction != NULL) { dc_gamma_release(&plane_state->gamma_correction); @@ -80,7 +78,6 @@ static void destruct(struct dc_plane_state *plane_state) plane_state->in_transfer_func); plane_state->in_transfer_func = NULL; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (plane_state->in_shaper_func != NULL) { dc_transfer_func_release( plane_state->in_shaper_func); @@ -97,7 +94,6 @@ static void destruct(struct dc_plane_state *plane_state) plane_state->blend_tf = NULL; } -#endif } /******************************************************************************* @@ -112,16 +108,14 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, struct dc_plane_state *dc_create_plane_state(struct dc *dc) { - struct dc *core_dc = dc; - struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), - GFP_KERNEL); + GFP_KERNEL); if (NULL == plane_state) return NULL; kref_init(&plane_state->refcount); - construct(core_dc->ctx, plane_state); + dc_plane_construct(dc->ctx, plane_state); return plane_state; } @@ -141,7 +135,7 @@ const struct dc_plane_status *dc_plane_get_status( const struct dc_plane_state *plane_state) { const struct dc_plane_status *plane_status; - struct dc *core_dc; + struct dc *dc; int i; if (!plane_state || @@ -152,15 +146,15 @@ const struct dc_plane_status *dc_plane_get_status( } plane_status = &plane_state->status; - core_dc = plane_state->ctx->dc; + dc = plane_state->ctx->dc; - if (core_dc->current_state == NULL) + if (dc->current_state == NULL) return NULL; /* Find the current plane state and set its pending bit to false */ - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = - &core_dc->current_state->res_ctx.pipe_ctx[i]; + &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; @@ -170,14 +164,14 @@ const struct dc_plane_status *dc_plane_get_status( break; } - for (i = 0; i < core_dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = - &core_dc->current_state->res_ctx.pipe_ctx[i]; + &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->plane_state != plane_state) continue; - core_dc->hwss.update_pending_status(pipe_ctx); + dc->hwss.update_pending_status(pipe_ctx); } return plane_status; @@ -191,7 +185,7 @@ void dc_plane_state_retain(struct dc_plane_state *plane_state) static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); - destruct(plane_state); + dc_plane_destruct(plane_state); kvfree(plane_state); } @@ -262,7 +256,6 @@ alloc_fail: return NULL; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static void dc_3dlut_func_free(struct kref *kref) { struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount); @@ -296,6 +289,5 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut) { kref_get(&lut->refcount); } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a82352a87808..c24639080371 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.48" +#define DC_VER "3.2.62" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -54,6 +54,10 @@ struct dc_versions { struct dmcu_version dmcu_version; }; +enum dp_protocol_version { + DP_VERSION_1_4, +}; + enum dc_plane_type { DC_PLANE_TYPE_INVALID, DC_PLANE_TYPE_DCE_RGB, @@ -111,19 +115,18 @@ struct dc_caps { bool force_dp_tps4_for_cp2520; bool disable_dp_clk_share; bool psp_setup_panel_mode; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + bool extended_aux_timeout_support; + bool dmcub_support; bool hw_3d_lut; -#endif + enum dp_protocol_version max_dp_protocol_version; struct dc_plane_cap planes[MAX_PLANES]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa { bool no_connect_phy_config; bool dedcn20_305_wa; bool skip_clock_update; }; -#endif struct dc_dcc_surface_param { struct dc_size surface_size; @@ -219,7 +222,9 @@ struct dc_config { bool allow_seamless_boot_optimization; bool power_down_display_on_boot; bool edp_not_connected; + bool force_enum_edp; bool forced_clocks; + bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; }; @@ -227,6 +232,7 @@ enum visual_confirm { VISUAL_CONFIRM_DISABLE = 0, VISUAL_CONFIRM_SURFACE = 1, VISUAL_CONFIRM_HDR = 2, + VISUAL_CONFIRM_MPCTREE = 4, }; enum dcc_option { @@ -245,6 +251,19 @@ enum wm_report_mode { WM_REPORT_DEFAULT = 0, WM_REPORT_OVERRIDE = 1, }; +enum dtm_pstate{ + dtm_level_p0 = 0,/*highest voltage*/ + dtm_level_p1, + dtm_level_p2, + dtm_level_p3, + dtm_level_p4,/*when active_display_count = 0*/ +}; + +enum dcn_pwr_state { + DCN_PWR_STATE_UNKNOWN = -1, + DCN_PWR_STATE_MISSION_MODE = 0, + DCN_PWR_STATE_LOW_POWER = 3, +}; /* * For any clocks that may differ per pipe @@ -252,11 +271,7 @@ enum wm_report_mode { */ struct dc_clocks { int dispclk_khz; - int max_supported_dppclk_khz; - int max_supported_dispclk_khz; int dppclk_khz; - int bw_dppclk_khz; /*a copy of dppclk_khz*/ - int bw_dispclk_khz; int dcfclk_khz; int socclk_khz; int dcfclk_deep_sleep_khz; @@ -264,12 +279,17 @@ struct dc_clocks { int phyclk_khz; int dramclk_khz; bool p_state_change_support; - + enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of * optimization required */ bool prev_p_state_change_support; + enum dtm_pstate dtm_level; + int max_supported_dppclk_khz; + int max_supported_dispclk_khz; + int bw_dppclk_khz; /*a copy of dppclk_khz*/ + int bw_dispclk_khz; }; struct dc_bw_validation_profile { @@ -345,9 +365,9 @@ struct dc_debug_options { bool disable_dfs_bypass; bool disable_dpp_power_gate; bool disable_hubp_power_gate; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_dsc_power_gate; -#endif + int dsc_min_slice_height_override; + bool native422_support; bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; unsigned int min_disp_clk_khz; @@ -382,22 +402,23 @@ struct dc_debug_options { unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_fclk_khz; bool disable_tri_buf; + bool dmub_offload_enabled; + bool dmcub_emulation; + bool dmub_command_table; /* for testing only */ struct dc_bw_validation_profile bw_val_profile; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_fec; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 bool disable_48mhz_pwrdwn; -#endif /* This forces a hard min on the DCFCLK requested to SMU/PP * watermarks are not affected. */ unsigned int force_min_dcfclk_mhz; bool disable_timing_sync; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool cm_in_bypass; -#endif int force_clock_mode;/*every mode change.*/ + + bool nv12_iflip_vm_wa; + bool disable_dram_clock_change_vactive_support; + bool validate_dml_output; }; struct dc_debug_data { @@ -406,7 +427,6 @@ struct dc_debug_data { uint32_t auxErrorCount; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config { struct { uint64_t start_addr; @@ -436,7 +456,6 @@ struct dc_virtual_addr_space_config { uint32_t page_table_block_size_in_bytes; uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid }; -#endif struct dc_bounding_box_overrides { int sr_exit_time_ns; @@ -462,13 +481,9 @@ struct dc { struct dc_config config; struct dc_debug_options debug; struct dc_bounding_box_overrides bb_overrides; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa work_arounds; -#endif struct dc_context *ctx; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config vm_pa_config; -#endif uint8_t link_count; struct dc_link *links[MAX_PIPES * 2]; @@ -484,7 +499,7 @@ struct dc { /* Inputs into BW and WM calculations. */ struct bw_calcs_dceip *bw_dceip; struct bw_calcs_vbios *bw_vbios; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_soc_bounding_box *dcn_soc; struct dcn_ip_params *dcn_ip; struct display_mode_lib dml; @@ -506,10 +521,8 @@ struct dc { struct dc_debug_data debug_data; const char *build_id; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct vm_helper *vm_helper; const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; enum frame_buffer_mode { @@ -541,30 +554,36 @@ struct dc_init_data { struct dc_bios *vbios_override; enum dce_environment dce_environment; + struct dmub_offload_funcs *dmub_if; + struct dc_reg_helper_state *dmub_offload; + struct dc_config flags; uint32_t log_mask; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; -#endif }; struct dc_callback_init { +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#else uint8_t reserved; +#endif }; struct dc *dc_create(const struct dc_init_data *init_params); +void dc_hardware_init(struct dc *dc); + int dc_get_vmid_use_vector(struct dc *dc); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); /* Returns the number of vmids supported */ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config); -#endif void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params); +void dc_deinit_callbacks(struct dc *dc); void dc_destroy(struct dc **dc); /******************************************************************************* @@ -637,7 +656,6 @@ struct dc_transfer_func { }; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) union dc_3dlut_state { struct { @@ -656,12 +674,11 @@ union dc_3dlut_state { struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; - uint32_t hdr_multiplier; + struct fixed31_32 hdr_multiplier; bool initialized; /*remove after diag fix*/ union dc_3dlut_state state; struct dc_context *ctx; }; -#endif /* * This structure is filled in by dc_surface_get_status and contains * the last requested address and the currently active address so the called @@ -684,7 +701,7 @@ union surface_update_flags { uint32_t horizontal_mirror_change:1; uint32_t per_pixel_alpha_change:1; uint32_t global_alpha_change:1; - uint32_t sdr_white_level:1; + uint32_t hdr_mult:1; uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; @@ -712,9 +729,7 @@ union surface_update_flags { struct dc_plane_state { struct dc_plane_address address; struct dc_plane_flip_time time; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool triplebuffer_flips; -#endif struct scaling_taps scaling_quality; struct rect src_rect; struct rect dst_rect; @@ -730,18 +745,16 @@ struct dc_plane_state { struct dc_bias_and_scale *bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; - uint32_t sdr_white_level; + struct fixed31_32 hdr_mult; // TODO: No longer used, remove struct dc_hdr_static_metadata hdr_static_ctx; enum dc_color_space color_space; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_3dlut *lut3d_func; struct dc_transfer_func *in_shaper_func; struct dc_transfer_func *blend_tf; -#endif enum surface_pixel_format format; enum dc_rotation_angle rotation; @@ -777,7 +790,6 @@ struct dc_plane_info { enum dc_rotation_angle rotation; enum plane_stereo_format stereo_format; enum dc_color_space color_space; - unsigned int sdr_white_level; bool horizontal_mirror; bool visible; bool per_pixel_alpha; @@ -801,7 +813,7 @@ struct dc_surface_update { const struct dc_flip_addrs *flip_addr; const struct dc_plane_info *plane_info; const struct dc_scaling_info *scaling_info; - + struct fixed31_32 hdr_mult; /* following updates require alloc/sleep/spin that is not isr safe, * null means no updates */ @@ -810,11 +822,9 @@ struct dc_surface_update { const struct dc_csc_transform *input_csc_color_matrix; const struct fixed31_32 *coeff_reduction_factor; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) const struct dc_transfer_func *func_shaper; const struct dc_3dlut *lut3d_func; const struct dc_transfer_func *blend_tf; -#endif }; /* @@ -835,11 +845,9 @@ void dc_transfer_func_retain(struct dc_transfer_func *dc_tf); void dc_transfer_func_release(struct dc_transfer_func *dc_tf); struct dc_transfer_func *dc_create_transfer_func(void); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_3dlut *dc_create_3dlut_func(void); void dc_3dlut_func_release(struct dc_3dlut *lut); void dc_3dlut_func_retain(struct dc_3dlut *lut); -#endif /* * This structure holds a surface address. There could be multiple addresses * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such @@ -956,10 +964,10 @@ struct dpcd_caps { bool panel_mode_edp; bool dpcd_display_control_capable; bool ext_receiver_cap_field_present; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT union dpcd_fec_capability fec_cap; struct dpcd_dsc_capabilities dsc_caps; -#endif + struct dc_lttpr_caps lttpr_caps; + }; #include "dc_link.h" @@ -980,14 +988,12 @@ struct dc_container_id { }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; struct dsc_dec_dpcd_caps dsc_dec_caps; }; -#endif /* * The sink structure contains EDID and other display device properties @@ -1002,9 +1008,7 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_sink_dsc_caps sink_dsc_caps; -#endif /* private to DC core */ struct dc_link *link; @@ -1065,10 +1069,8 @@ bool dc_is_dmcu_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /******************************************************************************* * DSC Interfaces ******************************************************************************/ #include "dc_dsc.h" -#endif #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 4ef97f65e55d..4f8f576d5fcf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -49,7 +49,8 @@ enum aux_channel_operation_result { AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN, AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY, AUX_CHANNEL_OPERATION_FAILED_TIMEOUT, - AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON + AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON, + AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c new file mode 100644 index 000000000000..59c298a6484f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_dmub_srv.h" +#include "../dmub/inc/dmub_srv.h" + +static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, + struct dmub_srv *dmub) +{ + dc_srv->dmub = dmub; + dc_srv->ctx = dc->ctx; +} + +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) +{ + struct dc_dmub_srv *dc_srv = + kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); + + if (dc_srv == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dc_dmub_srv_construct(dc_srv, dc, dmub); + + return dc_srv; +} + +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) +{ + if (*dmub_srv) { + kfree(*dmub_srv); + *dmub_srv = NULL; + } +} + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, + struct dmub_cmd_header *cmd) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_cmd_queue(dmub, cmd); + if (status == DMUB_STATUS_OK) + return; + + if (status != DMUB_STATUS_QUEUE_FULL) + goto error; + + /* Execute and wait for queue to become empty again. */ + dc_dmub_srv_cmd_execute(dc_dmub_srv); + dc_dmub_srv_wait_idle(dc_dmub_srv); + + /* Requeue the command. */ + status = dmub_srv_cmd_queue(dmub, cmd); + if (status == DMUB_STATUS_OK) + return; + +error: + DC_ERROR("Error queuing DMUB command: status=%d\n", status); +} + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_cmd_execute(dmub); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error starting DMUB execution: status=%d\n", status); +} + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + status = dmub_srv_wait_for_idle(dmub, 100000); + if (status != DMUB_STATUS_OK) + DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); +} + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub = dc_dmub_srv->dmub; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + enum dmub_status status; + + for (;;) { + /* Wait up to a second for PHY init. */ + status = dmub_srv_wait_for_phy_init(dmub, 1000000); + if (status == DMUB_STATUS_OK) + /* Initialization OK */ + break; + + DC_ERROR("DMCUB PHY init failed: status=%d\n", status); + ASSERT(0); + + if (status != DMUB_STATUS_TIMEOUT) + /* + * Server likely initialized or we don't have + * DMCUB HW support - this won't end. + */ + break; + + /* Continue spinning so we don't hang the ASIC. */ + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h new file mode 100644 index 000000000000..754b6077539c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DC_SRV_H_ +#define _DMUB_DC_SRV_H_ + +#include "os_types.h" +#include "../dmub/inc/dmub_cmd.h" + +struct dmub_srv; +struct dmub_cmd_header; + +struct dc_reg_helper_state { + bool gather_in_progress; + uint32_t same_addr_count; + bool should_burst_write; + union dmub_rb_cmd cmd_data; + unsigned int reg_seq_count; +}; + +struct dc_dmub_srv { + struct dmub_srv *dmub; + struct dc_reg_helper_state reg_helper_offload; + + struct dc_context *ctx; + void *dm; +}; + +void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, + struct dmub_cmd_header *cmd); + +void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); + +void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); + +#endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index ef79a686e4c2..dfe4472c9e40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -129,9 +129,7 @@ struct dc_link_training_overrides { bool *alternate_scrambler_reset; bool *enhanced_framing; bool *mst_enable; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool *fec_enable; -#endif }; union dpcd_rev { @@ -471,13 +469,13 @@ union training_aux_rd_interval { /* Automated test structures */ union test_request { struct { - uint8_t LINK_TRAINING :1; - uint8_t LINK_TEST_PATTRN :1; - uint8_t EDID_READ :1; - uint8_t PHY_TEST_PATTERN :1; - uint8_t AUDIO_TEST_PATTERN :1; - uint8_t RESERVED :1; - uint8_t TEST_STEREO_3D :1; + uint8_t LINK_TRAINING :1; + uint8_t LINK_TEST_PATTRN :1; + uint8_t EDID_READ :1; + uint8_t PHY_TEST_PATTERN :1; + uint8_t RESERVED :1; + uint8_t AUDIO_TEST_PATTERN :1; + uint8_t TEST_AUDIO_DISABLED_VIDEO :1; } bits; uint8_t raw; }; @@ -524,19 +522,52 @@ union link_test_pattern { union test_misc { struct dpcd_test_misc_bits { - unsigned char SYNC_CLOCK :1; + unsigned char SYNC_CLOCK :1; /* dpcd_test_color_format */ - unsigned char CLR_FORMAT :2; + unsigned char CLR_FORMAT :2; /* dpcd_test_dyn_range */ - unsigned char DYN_RANGE :1; - unsigned char YCBCR :1; + unsigned char DYN_RANGE :1; + unsigned char YCBCR_COEFS :1; /* dpcd_test_bit_depth */ - unsigned char BPC :3; + unsigned char BPC :3; } bits; unsigned char raw; }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +union audio_test_mode { + struct { + unsigned char sampling_rate :4; + unsigned char channel_count :4; + } bits; + unsigned char raw; +}; + +union audio_test_pattern_period { + struct { + unsigned char pattern_period :4; + unsigned char reserved :4; + } bits; + unsigned char raw; +}; + +struct audio_test_pattern_type { + unsigned char value; +}; + +struct dp_audio_test_data_flags { + uint8_t test_requested :1; + uint8_t disable_video :1; +}; + +struct dp_audio_test_data { + + struct dp_audio_test_data_flags flags; + uint8_t sampling_rate; + uint8_t channel_count; + uint8_t pattern_type; + uint8_t pattern_period[8]; +}; + /* FEC capability DPCD register field bits-*/ union dpcd_fec_capability { struct { @@ -661,6 +692,5 @@ struct dpcd_dsc_capabilities { union dpcd_dsc_ext_capabilities dsc_ext_caps; }; -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 6e42209f0e20..8ec09813ee17 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -1,4 +1,3 @@ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef DC_DSC_H_ #define DC_DSC_H_ /* @@ -30,6 +29,7 @@ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 #define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 +#include "dc_types.h" struct dc_dsc_bw_range { uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */ @@ -39,24 +39,42 @@ struct dc_dsc_bw_range { uint32_t stream_kbps; /* Uncompressed stream bandwidth */ }; +struct display_stream_compressor { + const struct dsc_funcs *funcs; + struct dc_context *ctx; + int inst; +}; + +struct dc_dsc_policy { + bool use_min_slices_h; + int max_slices_h; // Maximum available if 0 + int min_slice_height; // Must not be less than 8 + uint32_t max_target_bpp; + uint32_t min_target_bpp; +}; bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, - const uint32_t min_kbps, - const uint32_t max_kbps, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, + const uint32_t min_bpp, + const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); -#endif + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, + struct dc_dsc_policy *policy); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 30b2f9edd42f..02a63e9cb62f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -32,6 +32,74 @@ #include "dm_services.h" #include <stdarg.h> +#include "dc.h" +#include "dc_dmub_srv.h" + +static inline void submit_dmub_read_modify_write( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; + bool gather = false; + + offload->should_burst_write = + (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); + cmd_buf->header.payload_bytes = + sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + + offload->reg_seq_count = 0; + offload->same_addr_count = 0; +} + +static inline void submit_dmub_burst_write( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + bool gather = false; + + cmd_buf->header.payload_bytes = + sizeof(uint32_t) * offload->reg_seq_count; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + + offload->reg_seq_count = 0; +} + +static inline void submit_dmub_reg_wait( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + bool gather = false; + + gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; + ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; + + dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header); + + memset(cmd_buf, 0, sizeof(*cmd_buf)); + offload->reg_seq_count = 0; + + ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; +} + struct dc_reg_value_masks { uint32_t value; uint32_t mask; @@ -77,6 +145,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, } } +static void dmub_flush_buffer_execute( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + submit_dmub_read_modify_write(offload, ctx); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static void dmub_flush_burst_write_buffer_execute( + struct dc_reg_helper_state *offload, + const struct dc_context *ctx) +{ + submit_dmub_burst_write(offload, ctx); + dc_dmub_srv_cmd_execute(ctx->dmub_srv); +} + +static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, + uint32_t reg_val) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; + + /* flush command if buffer is full */ + if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) + dmub_flush_burst_write_buffer_execute(offload, ctx); + + if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && + addr != cmd_buf->addr) { + dmub_flush_burst_write_buffer_execute(offload, ctx); + return false; + } + + cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; + cmd_buf->header.sub_type = 0; + cmd_buf->addr = addr; + cmd_buf->write_values[offload->reg_seq_count] = reg_val; + offload->reg_seq_count++; + + return true; +} + +static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, + struct dc_reg_value_masks *field_value_mask) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; + struct dmub_cmd_read_modify_write_sequence *seq; + + /* flush command if buffer is full */ + if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && + offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) + dmub_flush_buffer_execute(offload, ctx); + + if (offload->should_burst_write) { + if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) + return field_value_mask->value; + else + offload->should_burst_write = false; + } + + /* pack commands */ + cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; + cmd_buf->header.sub_type = 0; + seq = &cmd_buf->seq[offload->reg_seq_count]; + + if (offload->reg_seq_count) { + if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) + offload->same_addr_count++; + else + offload->same_addr_count = 0; + } + + seq->addr = addr; + seq->modify_mask = field_value_mask->mask; + seq->modify_value = field_value_mask->value; + offload->reg_seq_count++; + + return field_value_mask->value; +} + +static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, + uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) +{ + struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; + struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; + + cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; + cmd_buf->header.sub_type = 0; + cmd_buf->reg_wait.addr = addr; + cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); + cmd_buf->reg_wait.mask = mask; + cmd_buf->reg_wait.time_out_us = time_out_us; +} + uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, @@ -93,6 +255,11 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, va_end(ap); + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) + return dmub_reg_value_pack(ctx, addr, &field_value_mask); + /* todo: return void so we can decouple code running in driver from register states */ + /* mmio write directly */ reg_val = dm_read_reg(ctx, addr); reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; @@ -118,6 +285,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx, /* mmio write directly */ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); + /* todo: return void so we can decouple code running in driver from register states */ + } + dm_write_reg(ctx, addr, reg_val); return reg_val; } @@ -134,6 +308,14 @@ uint32_t dm_read_reg_func( return 0; } #endif + + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress && + !ctx->dmub_srv->reg_helper_offload.should_burst_write) { + ASSERT(false); + return 0; + } + value = cgs_read_register(ctx->cgs_device, address); trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); @@ -299,7 +481,19 @@ void generic_reg_wait(const struct dc_context *ctx, uint32_t reg_val; int i; - /* something is terribly wrong if time out is > 200ms. (5Hz) */ + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, + delay_between_poll_us * time_out_num_tries); + return; + } + + /* + * Something is terribly wrong if time out is > 3000ms. + * 3000ms is the maximum time needed for SMU to pass values back. + * This value comes from experiments. + * + */ ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); for (i = 0; i <= time_out_num_tries; i++) { @@ -346,6 +540,12 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx, { uint32_t value = 0; + // when reg read, there should not be any offload. + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress) { + ASSERT(false); + } + dm_write_reg(ctx, addr_index, index); value = dm_read_reg(ctx, addr_data); @@ -382,3 +582,68 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, return reg_val; } + +void reg_sequence_start_gather(const struct dc_context *ctx) +{ + /* if reg sequence is supported and enabled, set flag to + * indicate we want to have REG_SET, REG_UPDATE macro build + * reg sequence command buffer rather than MMIO directly. + */ + + if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { + struct dc_reg_helper_state *offload = + &ctx->dmub_srv->reg_helper_offload; + + /* caller sequence mismatch. need to debug caller. offload will not work!!! */ + ASSERT(!offload->gather_in_progress); + + offload->gather_in_progress = true; + } +} + +void reg_sequence_start_execute(const struct dc_context *ctx) +{ + struct dc_reg_helper_state *offload; + + if (!ctx->dmub_srv) + return; + + offload = &ctx->dmub_srv->reg_helper_offload; + + if (offload && offload->gather_in_progress) { + offload->gather_in_progress = false; + offload->should_burst_write = false; + switch (offload->cmd_data.cmd_common.header.type) { + case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: + submit_dmub_read_modify_write(offload, ctx); + break; + case DMUB_CMD__REG_REG_WAIT: + submit_dmub_reg_wait(offload, ctx); + break; + case DMUB_CMD__REG_SEQ_BURST_WRITE: + submit_dmub_burst_write(offload, ctx); + break; + default: + return; + } + + dc_dmub_srv_cmd_execute(ctx->dmub_srv); + } +} + +void reg_sequence_wait_done(const struct dc_context *ctx) +{ + /* callback to DM to poll for last submission done*/ + struct dc_reg_helper_state *offload; + + if (!ctx->dmub_srv) + return; + + offload = &ctx->dmub_srv->reg_helper_offload; + + if (offload && + ctx->dc->debug.dmub_offload_enabled && + !ctx->dc->debug.dmcub_emulation) { + dc_dmub_srv_wait_idle(ctx->dmub_srv); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 0b8700a8a94a..25c50bcab9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -124,20 +124,6 @@ struct plane_size { int chroma_pitch; struct rect surface_size; struct rect chroma_size; - - union { - struct { - struct rect surface_size; - int surface_pitch; - } grph; - - struct { - struct rect luma_size; - int luma_pitch; - struct rect chroma_size; - int chroma_pitch; - } video; - }; }; struct dc_plane_dcc_param { @@ -148,21 +134,6 @@ struct dc_plane_dcc_param { int meta_pitch_c; bool independent_64b_blks_c; - - union { - struct { - int meta_pitch; - bool independent_64b_blks; - } grph; - - struct { - int meta_pitch_l; - bool independent_64b_blks_l; - - int meta_pitch_c; - bool independent_64b_blks_c; - } video; - }; }; /*Displayable pixel format in fb*/ @@ -194,12 +165,10 @@ enum surface_pixel_format { /*swaped & float*/ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, /*grow graphics here if necessary */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX, SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX, SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT, SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT, -#endif SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, @@ -207,10 +176,8 @@ enum surface_pixel_format { SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010, SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102, -#endif SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_INVALID @@ -249,12 +216,10 @@ enum tile_split_values { DC_ROTATED_MICRO_TILING = 0x3, }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum tripleBuffer_enable { DC_TRIPLEBUFFER_DISABLE = 0x0, DC_TRIPLEBUFFER_ENABLE = 0x1, }; -#endif /* TODO: These values come from hardware spec. We need to readdress this * if they ever change. @@ -454,13 +419,11 @@ struct dc_csc_transform { bool enable_adjustment; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_rgb_fixed { struct fixed31_32 red; struct fixed31_32 green; struct fixed31_32 blue; }; -#endif struct dc_gamma { struct kref refcount; @@ -495,10 +458,8 @@ enum dc_cursor_color_format { CURSOR_MODE_COLOR_1BIT_AND, CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA, CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED, CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED -#endif }; /* @@ -605,6 +566,11 @@ enum dc_quantization_range { QUANTIZATION_RANGE_LIMITED }; +enum dc_dynamic_expansion { + DYN_EXPANSION_AUTO, + DYN_EXPANSION_DISABLE +}; + /* XFM */ /* used in struct dc_plane_state */ @@ -646,10 +612,8 @@ enum dc_color_depth { COLOR_DEPTH_121212, COLOR_DEPTH_141414, COLOR_DEPTH_161616, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 COLOR_DEPTH_999, COLOR_DEPTH_111111, -#endif COLOR_DEPTH_COUNT }; @@ -710,9 +674,7 @@ struct dc_crtc_timing_flags { * rates less than or equal to 340Mcsc */ uint32_t LTE_340MCSC_SCRAMBLE:1; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT uint32_t DSC : 1; /* Use DSC with this timing */ -#endif }; enum dc_timing_3d_format { @@ -737,31 +699,6 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; -enum trigger_delay { - TRIGGER_DELAY_NEXT_PIXEL = 0, - TRIGGER_DELAY_NEXT_LINE, -}; - -enum crtc_event { - CRTC_EVENT_VSYNC_RISING = 0, - CRTC_EVENT_VSYNC_FALLING -}; - -struct crtc_trigger_info { - bool enabled; - struct dc_stream_state *event_source; - enum crtc_event event; - enum trigger_delay delay; -}; - -struct dc_crtc_timing_adjust { - uint32_t v_total_min; - uint32_t v_total_max; - uint32_t v_total_mid; - uint32_t v_total_mid_frame_num; -}; - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ uint32_t num_slices_v; /* Number of DSC slices - vertical */ @@ -772,7 +709,6 @@ struct dc_dsc_config { bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ }; -#endif struct dc_crtc_timing { uint32_t h_total; uint32_t h_border_left; @@ -799,11 +735,34 @@ struct dc_crtc_timing { enum scanning_type scan_type; struct dc_crtc_timing_flags flags; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config dsc_cfg; -#endif }; +enum trigger_delay { + TRIGGER_DELAY_NEXT_PIXEL = 0, + TRIGGER_DELAY_NEXT_LINE, +}; + +enum crtc_event { + CRTC_EVENT_VSYNC_RISING = 0, + CRTC_EVENT_VSYNC_FALLING +}; + +struct crtc_trigger_info { + bool enabled; + struct dc_stream_state *event_source; + enum crtc_event event; + enum trigger_delay delay; +}; + +struct dc_crtc_timing_adjust { + uint32_t v_total_min; + uint32_t v_total_max; + uint32_t v_total_mid; + uint32_t v_total_mid_frame_num; +}; + + /* Passed on init */ enum vram_type { VIDEO_MEMORY_TYPE_GDDR5 = 2, @@ -813,7 +772,6 @@ enum vram_type { VIDEO_MEMORY_TYPE_GDDR6 = 6, }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dwb_cnv_out_bpc { DWB_CNV_OUT_BPC_8BPC = 0, DWB_CNV_OUT_BPC_10BPC = 1, @@ -864,7 +822,6 @@ struct mcif_buf_params { unsigned int swlock; }; -#endif #define MAX_TG_COLOR_VALUE 0x3FF struct tg_color { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 9ea75db3484e..1ff79f703734 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -29,13 +29,11 @@ #include "dc_types.h" #include "grph_object_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum dc_link_fec_state { dc_link_fec_not_ready, dc_link_fec_ready, dc_link_fec_enabled }; -#endif struct dc_link_status { bool link_active; struct dpcd_caps *dpcd_caps; @@ -85,6 +83,7 @@ struct dc_link { bool link_state_valid; bool aux_access_disabled; bool sync_lt_in_progress; + bool is_lttpr_mode_transparent; /* caps is the same as reported_link_cap. link_traing use * reported_link_cap. Will clean up. TODO @@ -95,6 +94,7 @@ struct dc_link { struct dc_lane_settings cur_lane_setting; struct dc_link_settings preferred_link_setting; struct dc_link_training_overrides preferred_training_settings; + struct dp_audio_test_data audio_test_data; uint8_t ddc_hw_inst; @@ -126,7 +126,8 @@ struct dc_link { unsigned short chip_caps; unsigned int dpcd_sink_count; enum edp_revision edp_revision; - bool psr_enabled; + bool psr_feature_enabled; + bool psr_allow_active; /* MST record stream using this link */ struct link_flags { @@ -139,9 +140,7 @@ struct dc_link { struct link_trace link_trace; struct gpio *hpd_gpio; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum dc_link_fec_state fec_state; -#endif }; const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); @@ -158,6 +157,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ return dc->links[link_index]; } +static inline struct dc_link *get_edp_link(const struct dc *dc) +{ + int i; + + // report any eDP links, even unconnected DDI's + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) + return dc->links[i]; + } + return NULL; +} + /* Set backlight level of an embedded panel (eDP, LVDS). * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer * and 16 bit fractional, where 1.0 is max backlight value. @@ -170,7 +181,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link); bool dc_link_set_abm_disable(const struct dc_link *dc_link); -bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); +bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait); bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state); @@ -192,6 +203,7 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -245,6 +257,7 @@ void dc_link_dp_disable_hpd(const struct dc_link *link); bool dc_link_dp_set_test_pattern( struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); @@ -276,6 +289,7 @@ void dc_link_enable_hpd(const struct dc_link *link); void dc_link_disable_hpd(const struct dc_link *link); void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); @@ -291,6 +305,10 @@ bool dc_submit_i2c( uint32_t link_index, struct i2c_command *cmd); +bool dc_submit_i2c_oem( + struct dc *dc, + struct i2c_command *cmd); + uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 0fa1c26bc20d..3ea54321b045 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -52,7 +52,6 @@ struct freesync_context { bool dummy; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum hubp_dmdata_mode { DMDATA_SW_MODE, DMDATA_HW_MODE @@ -82,9 +81,7 @@ struct dc_dmdata_attributes { /* An unbounded array of uint32s, represents software dmdata to be loaded */ uint32_t *dmdata_sw_data; }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_writeback_info { bool wb_enabled; int dwb_pipe_inst; @@ -96,7 +93,6 @@ struct dc_writeback_update { unsigned int num_wb_info; struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; }; -#endif enum vertical_interrupt_ref_point { START_V_UPDATE = 0, @@ -113,6 +109,19 @@ struct periodic_interrupt_config { int lines_offset; }; +union stream_update_flags { + struct { + uint32_t scaling:1; + uint32_t out_tf:1; + uint32_t out_csc:1; + uint32_t abm_level:1; + uint32_t dpms_off:1; + uint32_t gamut_remap:1; + uint32_t wb_update:1; + } bits; + + uint32_t raw; +}; struct dc_stream_state { // sink is deprecated, new code should not reference @@ -149,6 +158,7 @@ struct dc_stream_state { enum view_3d_format view_format; + bool use_vsc_sdp_for_colorimetry; bool ignore_msa_timing_param; bool converter_disable_audio; uint8_t qs_bit; @@ -188,11 +198,9 @@ struct dc_stream_state { struct crtc_trigger_info triggered_crtc_reset; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* writeback */ unsigned int num_wb_info; struct dc_writeback_info writeback_info[MAX_DWB_PIPES]; -#endif /* Computed state bits */ bool mode_changed : 1; @@ -211,12 +219,15 @@ struct dc_stream_state { bool apply_seamless_boot_optimization; uint32_t stream_id; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool is_dsc_enabled; -#endif + union stream_update_flags update_flags; }; +#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF + struct dc_stream_update { + struct dc_stream_state *stream; + struct rect src; struct rect dst; struct dc_transfer_func *out_transfer_func; @@ -231,6 +242,7 @@ struct dc_stream_update { struct dc_info_packet *vsp_infopacket; bool *dpms_off; + bool integer_scaling_update; struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; @@ -238,12 +250,8 @@ struct dc_stream_update { struct dc_csc_transform *output_csc_transform; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_writeback_update *wb_update; -#endif -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) struct dc_dsc_config *dsc_config; -#endif }; bool dc_is_stream_unchanged( @@ -333,7 +341,6 @@ bool dc_add_all_planes_for_stream( int plane_count, struct dc_state *context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); @@ -344,7 +351,6 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) bool dc_stream_set_dynamic_metadata(struct dc *dc, struct dc_stream_state *stream, struct dc_dmdata_attributes *dmdata_attr); -#endif enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); @@ -431,6 +437,9 @@ void dc_stream_set_static_screen_events(struct dc *dc, int num_streams, const struct dc_static_screen_events *events); +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option); + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b273735b6a3e..2b92bfa28bde 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,6 +25,10 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ +/* AND EdidUtility only needs a portion + * of this file, including the rest only + * causes additional issues. + */ #include "os_types.h" #include "fixed31_32.h" #include "irq_types.h" @@ -33,12 +37,17 @@ #include "dal_types.h" #include "grph_object_defs.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif + /* forward declarations */ struct dc_plane_state; struct dc_stream_state; struct dc_link; struct dc_sink; struct dal; +struct dc_dmub_srv; /******************************** * Environment definitions @@ -100,6 +109,11 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; + struct dc_dmub_srv *dmub_srv; + +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#endif }; @@ -107,6 +121,7 @@ struct dc_context { #define DC_EDID_BLOCK_SIZE 128 #define MAX_SURFACE_NUM 4 #define NUM_PIXEL_FORMATS 10 +#define MAX_REPEATER_CNT 8 #include "dc_ddc_types.h" @@ -159,6 +174,12 @@ enum dc_edid_status { EDID_THE_SAME, }; +enum act_return_status { + ACT_SUCCESS, + ACT_LINK_LOST, + ACT_FAILED +}; + /* audio capability from EDID*/ struct dc_cea_audio_mode { uint8_t format_code; /* ucData[0] [6:3]*/ @@ -384,6 +405,30 @@ enum dpcd_downstream_port_max_bpc { DOWN_STREAM_MAX_12BPC, DOWN_STREAM_MAX_16BPC }; + + +enum link_training_offset { + DPRX = 0, + LTTPR_PHY_REPEATER1 = 1, + LTTPR_PHY_REPEATER2 = 2, + LTTPR_PHY_REPEATER3 = 3, + LTTPR_PHY_REPEATER4 = 4, + LTTPR_PHY_REPEATER5 = 5, + LTTPR_PHY_REPEATER6 = 6, + LTTPR_PHY_REPEATER7 = 7, + LTTPR_PHY_REPEATER8 = 8 +}; + +struct dc_lttpr_caps { + union dpcd_rev revision; + uint8_t mode; + uint8_t max_lane_count; + uint8_t max_link_rate; + uint8_t phy_repeater_cnt; + uint8_t max_ext_timeout; + uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; +}; + struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ enum display_dongle_type dongle_type; @@ -422,7 +467,6 @@ enum display_content_type { DISPLAY_CONTENT_TYPE_GAME = 8 }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* writeback */ struct dwb_stereo_params { bool stereo_enabled; /* false: normal mode, true: 3D stereo */ @@ -453,7 +497,6 @@ struct dc_dwb_params { enum dwb_subsample_position subsample_position; struct dc_transfer_func *out_transfer_func; }; -#endif /* audio*/ @@ -561,9 +604,7 @@ enum dc_infoframe_type { DC_HDMI_INFOFRAME_TYPE_AVI = 0x82, DC_HDMI_INFOFRAME_TYPE_SPD = 0x83, DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_DP_INFOFRAME_TYPE_PPS = 0x10, -#endif }; struct dc_info_packet { @@ -739,7 +780,6 @@ struct dc_clock_config { uint32_t current_clock_khz;/*current clock in use*/ }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { struct { @@ -809,5 +849,5 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; }; -#endif + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 58bd131d5b48..b8a3fc505c9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id) /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + 1, 80000); + return true; } @@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm) /* Enable the backlight output */ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); + /* Disable fractional pwm if configured */ + REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, + abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1); + /* Unlock group 2 backlight registers */ REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); @@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DCE_ABM(*abm); - if (abm_dce->base.dmcu_is_running == true) - abm_dce->base.funcs->set_abm_immediate_disable(*abm); - kfree(abm_dce); *abm = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index 7ba7e6f722f6..ba0caaffa24b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -67,7 +67,6 @@ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ABM_DCN20_REG_LIST() \ ABM_COMMON_REG_LIST_DCE_BASE(), \ SR(DC_ABM1_HG_SAMPLE_RATE), \ @@ -81,7 +80,6 @@ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \ NBIO_SR(BIOS_SCRATCH_2) -#endif #define ABM_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -163,9 +161,7 @@ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh) -#endif #define ABM_REG_FIELD_LIST(type) \ type ABM1_HG_NUM_OF_BINS_SEL; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index c3f9f4185ce8..f1a5d2c6aa37 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,6 +42,10 @@ #include "reg_helper.h" +#undef FN +#define FN(reg_name, field_name) \ + aux110->shift->field_name, aux110->mask->field_name + #define FROM_AUX_ENGINE(ptr) \ container_of((ptr), struct aux_engine_dce110, base) @@ -55,6 +59,16 @@ enum { AUX_TIMED_OUT_RETRY_COUNTER = 2, AUX_DEFER_RETRY_COUNTER = 6 }; + +#define TIME_OUT_INCREMENT 1016 +#define TIME_OUT_MULTIPLIER_8 8 +#define TIME_OUT_MULTIPLIER_16 16 +#define TIME_OUT_MULTIPLIER_32 32 +#define TIME_OUT_MULTIPLIER_64 64 +#define MAX_TIMEOUT_LENGTH 127 +#define DEFAULT_AUX_ENGINE_MULT 0 +#define DEFAULT_AUX_ENGINE_LENGTH 69 + static void release_engine( struct dce_aux *engine) { @@ -198,7 +212,7 @@ static void submit_channel_request( REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); /* set the delay and the number of bytes to write */ @@ -327,7 +341,7 @@ static enum aux_channel_operation_result get_channel_status( /* poll to make sure that SW_DONE is asserted */ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); value = REG_READ(AUX_SW_STATUS); /* in case HPD is LOW, exit AUX transaction */ @@ -414,20 +428,103 @@ void dce110_engine_destroy(struct dce_aux **engine) *engine = NULL; } + +static uint32_t dce_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout_in_us) +{ + uint32_t multiplier = 0; + uint32_t length = 0; + uint32_t prev_length = 0; + uint32_t prev_mult = 0; + uint32_t prev_timeout_val = 0; + struct ddc *ddc_pin = ddc->ddc_pin; + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + + /* 1-Update polling timeout period */ + aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; + + /* 2-Update aux timeout period length and multiplier */ + if (timeout_in_us == 0) { + multiplier = DEFAULT_AUX_ENGINE_MULT; + length = DEFAULT_AUX_ENGINE_LENGTH; + } else if (timeout_in_us <= TIME_OUT_INCREMENT) { + multiplier = 0; + length = timeout_in_us/TIME_OUT_MULTIPLIER_8; + if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) + length++; + } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) { + multiplier = 1; + length = timeout_in_us/TIME_OUT_MULTIPLIER_16; + if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0) + length++; + } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) { + multiplier = 2; + length = timeout_in_us/TIME_OUT_MULTIPLIER_32; + if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0) + length++; + } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) { + multiplier = 3; + length = timeout_in_us/TIME_OUT_MULTIPLIER_64; + if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0) + length++; + } + + length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; + + REG_GET_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, &prev_length, AUX_RX_TIMEOUT_LEN_MUL, &prev_mult); + + switch (prev_mult) { + case 0: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_8; + break; + case 1: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_16; + break; + case 2: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_32; + break; + case 3: + prev_timeout_val = prev_length * TIME_OUT_MULTIPLIER_64; + break; + default: + prev_timeout_val = DEFAULT_AUX_ENGINE_LENGTH * TIME_OUT_MULTIPLIER_8; + break; + } + + REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); + + return prev_timeout_val; +} + +static struct dce_aux_funcs aux_functions = { + .configure_timeout = NULL, + .destroy = NULL, +}; + struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs) + const struct dce110_aux_registers *regs, + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable) { aux_engine110->base.ddc = NULL; aux_engine110->base.ctx = ctx; aux_engine110->base.delay = 0; aux_engine110->base.max_defer_write_retry = 0; aux_engine110->base.inst = inst; - aux_engine110->timeout_period = timeout_period; + aux_engine110->polling_timeout_period = timeout_period; aux_engine110->regs = regs; + aux_engine110->mask = mask; + aux_engine110->shift = shift; + aux_engine110->base.funcs = &aux_functions; + if (is_ext_aux_timeout_configurable) + aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout; + return &aux_engine110->base; } @@ -464,8 +561,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, memset(&aux_rep, 0, sizeof(aux_rep)); aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - if (!acquire(aux_engine, ddc_pin)) + if (!acquire(aux_engine, ddc_pin)) { + *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE; return -1; + } if (payload->i2c_over_aux) aux_req.type = AUX_TRANSACTION_TYPE_I2C; @@ -475,7 +574,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, aux_req.action = i2caux_action_from_payload(payload); aux_req.address = payload->address; - aux_req.delay = payload->defer_delay * 10; + aux_req.delay = 0; aux_req.length = payload->length; aux_req.data = payload->data; @@ -512,6 +611,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, uint8_t reply; bool payload_reply = true; enum aux_channel_operation_result operation_result; + bool retry_on_defer = false; + int aux_ack_retries = 0, aux_defer_retries = 0, aux_i2c_defer_retries = 0, @@ -542,10 +643,19 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_TRANSACTION_REPLY_AUX_DEFER: - case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + retry_on_defer = true; + /* fall through */ + case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; + } else { + if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || + (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { + if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } + } break; case AUX_TRANSACTION_REPLY_I2C_DEFER: @@ -569,19 +679,29 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) - goto fail; - else { - /* - * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts - * According to the DP spec there should be 3 retries total - * with a 400us wait inbetween each. Hardware already waits - * for 550us therefore no wait is required here. - */ + // Check whether a DEFER had occurred before the timeout. + // If so, treat timeout as a DEFER. + if (retry_on_defer) { + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + goto fail; + else if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } else { + if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES) + goto fail; + else { + /* + * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts + * According to the DP spec there should be 3 retries total + * with a 400us wait inbetween each. Hardware already waits + * for 550us therefore no wait is required here. + */ + } } break; case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: default: goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index ed7fec8fe253..382465862f29 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -29,15 +29,15 @@ #include "i2caux_interface.h" #include "inc/hw/aux_engine.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 + #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ SRI(AUX_ARB_CONTROL, DP_AUX, id), \ SRI(AUX_SW_DATA, DP_AUX, id), \ SRI(AUX_SW_CONTROL, DP_AUX, id), \ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ SRI(AUX_SW_STATUS, DP_AUX, id) -#endif #define AUX_COMMON_REG_LIST(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -55,6 +55,7 @@ struct dce110_aux_registers { uint32_t AUX_SW_DATA; uint32_t AUX_SW_CONTROL; uint32_t AUX_INTERRUPT_CONTROL; + uint32_t AUX_DPHY_RX_CONTROL1; uint32_t AUX_SW_STATUS; uint32_t AUXN_IMPCAL; uint32_t AUXP_IMPCAL; @@ -62,6 +63,156 @@ struct dce110_aux_registers { uint32_t AUX_RESET_MASK; }; +#define DCE_AUX_REG_FIELD_LIST(type)\ + type AUX_EN;\ + type AUX_RESET;\ + type AUX_RESET_DONE;\ + type AUX_REG_RW_CNTL_STATUS;\ + type AUX_SW_USE_AUX_REG_REQ;\ + type AUX_SW_DONE_USING_AUX_REG;\ + type AUX_SW_AUTOINCREMENT_DISABLE;\ + type AUX_SW_DATA_RW;\ + type AUX_SW_INDEX;\ + type AUX_SW_GO;\ + type AUX_SW_DATA;\ + type AUX_SW_REPLY_BYTE_COUNT;\ + type AUX_SW_DONE;\ + type AUX_SW_DONE_ACK;\ + type AUXN_IMPCAL_ENABLE;\ + type AUXP_IMPCAL_ENABLE;\ + type AUXN_IMPCAL_OVERRIDE_ENABLE;\ + type AUXP_IMPCAL_OVERRIDE_ENABLE;\ + type AUX_RX_TIMEOUT_LEN;\ + type AUX_RX_TIMEOUT_LEN_MUL;\ + type AUXN_CALOUT_ERROR_AK;\ + type AUXP_CALOUT_ERROR_AK;\ + type AUX_SW_START_DELAY;\ + type AUX_SW_WR_BYTES + +#define DCE10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE12_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* DCN10 MASK */ +#define DCN10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* for all other DCN */ +#define DCN_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh) + +#define AUX_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + enum { /* This is the timeout as defined in DP 1.2a, * 2.3.4 "Detailed uPacket TX AUX CH State Description". */ @@ -97,20 +248,34 @@ struct dce_aux { uint32_t max_defer_write_retry; bool acquire_reset; + struct dce_aux_funcs *funcs; }; +struct dce110_aux_registers_mask { + DCE_AUX_REG_FIELD_LIST(uint32_t); +}; + +struct dce110_aux_registers_shift { + DCE_AUX_REG_FIELD_LIST(uint8_t); +}; + + struct aux_engine_dce110 { struct dce_aux base; const struct dce110_aux_registers *regs; + const struct dce110_aux_registers_mask *mask; + const struct dce110_aux_registers_shift *shift; struct { uint32_t aux_control; uint32_t aux_arb_control; uint32_t aux_sw_data; uint32_t aux_sw_control; uint32_t aux_interrupt_control; + uint32_t aux_dphy_rx_control1; + uint32_t aux_dphy_rx_control0; uint32_t aux_sw_status; } addr; - uint32_t timeout_period; + uint32_t polling_timeout_period; }; struct aux_engine_dce110_init_data { @@ -120,12 +285,15 @@ struct aux_engine_dce110_init_data { const struct dce110_aux_registers *regs; }; -struct dce_aux *dce110_aux_engine_construct( - struct aux_engine_dce110 *aux_engine110, +struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs); + const struct dce110_aux_registers *regs, + + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable); void dce110_engine_destroy(struct dce_aux **engine); @@ -139,4 +307,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); + +struct dce_aux_funcs { + uint32_t (*configure_timeout) + (struct ddc_service *ddc, + uint32_t timeout); + void (*destroy) + (struct aux_engine **ptr); +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index f787a6b94781..2e992fbc0d71 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -905,7 +905,7 @@ static bool dce112_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; unsigned dp_dto_ref_100hz = 7000000; @@ -1004,7 +1004,6 @@ static bool get_pixel_clk_frequency_100hz( return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */ struct pixel_rate_range_table_entry { @@ -1064,7 +1063,6 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = { .get_pix_clk_dividers = dce112_get_pix_clk_dividers, .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; -#endif /*****************************************/ /* Constructor */ @@ -1435,7 +1433,6 @@ bool dce112_clk_src_construct( return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, @@ -1451,4 +1448,3 @@ bool dcn20_clk_src_construct( return ret; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 43c1bf60b83c..51bd25079606 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -55,7 +55,6 @@ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define CS_COMMON_REG_LIST_DCN2_0(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ @@ -76,9 +75,7 @@ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PIXEL_RATE_CNTL, OTG, 4),\ SRII(PIXEL_RATE_CNTL, OTG, 5) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ @@ -93,17 +90,14 @@ SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 3) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\ CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ @@ -201,7 +195,6 @@ bool dce112_clk_src_construct( const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dcn20_clk_src_construct( struct dce110_clk_src *clk_src, struct dc_context *ctx, @@ -210,6 +203,5 @@ bool dcn20_clk_src_construct( const struct dce110_clk_src_regs *regs, const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); -#endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 0b86cee4876f..e619e67e6b51 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -59,6 +59,12 @@ #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L +// PSP FW version +#define mmMP0_SMN_C2PMSG_58 0x1607A + +//Register access policy version +#define mmMP0_SMN_C2PMSG_91 0x1609B + static bool dce_dmcu_init(struct dmcu *dmcu) { // Do nothing @@ -318,7 +324,7 @@ static void dce_get_psr_wait_loop( return; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static void dcn10_get_dmcu_version(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); @@ -373,6 +379,7 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; + PERF_TRACE(); /* Definition of DC_DMCU_SCRATCH * 0 : firmare not loaded * 1 : PSP load DMCU FW but not initialized @@ -429,9 +436,21 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) break; } + PERF_TRACE(); return status; } +static bool dcn21_dmcu_init(struct dmcu *dmcu) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + uint32_t dmcub_psp_version = REG_READ(DMCUB_SCRATCH15); + + if (dmcu->auto_load_dmcu && dmcub_psp_version == 0) { + return false; + } + + return dcn10_dmcu_init(dmcu); +} static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, @@ -727,9 +746,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN1_0) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static bool dcn20_lock_phy(struct dmcu *dmcu) { @@ -777,7 +794,7 @@ static bool dcn20_unlock_phy(struct dmcu *dmcu) return true; } -#endif //(CONFIG_DRM_AMD_DC_DCN2_0) +#endif //(CONFIG_DRM_AMD_DC_DCN) static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, @@ -790,7 +807,7 @@ static const struct dmcu_funcs dce_funcs = { .is_dmcu_initialized = dce_is_dmcu_initialized }; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) static const struct dmcu_funcs dcn10_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -801,9 +818,7 @@ static const struct dmcu_funcs dcn10_funcs = { .get_psr_wait_loop = dcn10_get_psr_wait_loop, .is_dmcu_initialized = dcn10_is_dmcu_initialized }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static const struct dmcu_funcs dcn20_funcs = { .dmcu_init = dcn10_dmcu_init, .load_iram = dcn10_dmcu_load_iram, @@ -816,6 +831,19 @@ static const struct dmcu_funcs dcn20_funcs = { .lock_phy = dcn20_lock_phy, .unlock_phy = dcn20_unlock_phy }; + +static const struct dmcu_funcs dcn21_funcs = { + .dmcu_init = dcn21_dmcu_init, + .load_iram = dcn10_dmcu_load_iram, + .set_psr_enable = dcn10_dmcu_set_psr_enable, + .setup_psr = dcn10_dmcu_setup_psr, + .get_psr_state = dcn10_get_dmcu_psr_state, + .set_psr_wait_loop = dcn10_psr_wait_loop, + .get_psr_wait_loop = dcn10_get_psr_wait_loop, + .is_dmcu_initialized = dcn10_is_dmcu_initialized, + .lock_phy = dcn20_lock_phy, + .unlock_phy = dcn20_unlock_phy +}; #endif static void dce_dmcu_construct( @@ -836,6 +864,26 @@ static void dce_dmcu_construct( dmcu_dce->dmcu_mask = dmcu_mask; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dcn21_dmcu_construct( + struct dce_dmcu *dmcu_dce, + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask) +{ + uint32_t psp_version = 0; + + dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58); + dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029); + dmcu_dce->base.psp_version = psp_version; + } +} +#endif + struct dmcu *dce_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -857,7 +905,7 @@ struct dmcu *dce_dmcu_create( return &dmcu_dce->base; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dmcu *dcn10_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -878,9 +926,7 @@ struct dmcu *dcn10_dmcu_create( return &dmcu_dce->base; } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, @@ -901,15 +947,33 @@ struct dmcu *dcn20_dmcu_create( return &dmcu_dce->base; } + +struct dmcu *dcn21_dmcu_create( + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask) +{ + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); + + if (dmcu_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn21_dmcu_construct( + dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask); + + dmcu_dce->base.funcs = &dcn21_funcs; + + return &dmcu_dce->base; +} #endif void dce_dmcu_destroy(struct dmcu **dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu); - if (dmcu_dce->base.dmcu_state == DMCU_RUNNING) - dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true); - kfree(dmcu_dce); *dmcu = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index cc8587683b4b..5e044c2d3d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -71,6 +71,10 @@ DMCU_COMMON_REG_LIST_DCE_BASE(), \ SR(DMU_MEM_PWR_CNTL) +#define DMCU_DCN20_REG_LIST()\ + DMCU_DCN10_REG_LIST(), \ + SR(DMCUB_SCRATCH15) + #define DMCU_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -175,6 +179,7 @@ struct dce_dmcu_registers { uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK; uint32_t SMU_INTERRUPT_CONTROL; uint32_t DC_DMCU_SCRATCH; + uint32_t DMCUB_SCRATCH15; }; struct dce_dmcu { @@ -261,13 +266,17 @@ struct dmcu *dcn10_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dmcu *dcn20_dmcu_create( struct dc_context *ctx, const struct dce_dmcu_registers *regs, const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask); -#endif + +struct dmcu *dcn21_dmcu_create( + struct dc_context *ctx, + const struct dce_dmcu_registers *regs, + const struct dce_dmcu_shift *dmcu_shift, + const struct dce_dmcu_mask *dmcu_mask); void dce_dmcu_destroy(struct dmcu **dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c index 0275d6d60da4..e1c5839a80dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c @@ -25,7 +25,7 @@ #include "dce_hwseq.h" #include "reg_helper.h" -#include "hw_sequencer.h" +#include "hw_sequencer_private.h" #include "core_types.h" #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index ac04d77058f0..c5aa1f48593a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -25,7 +25,7 @@ #ifndef __DCE_HWSEQ_H__ #define __DCE_HWSEQ_H__ -#include "hw_sequencer.h" +#include "dc_types.h" #define BL_REG_LIST()\ SR(LVTMA_PWRSEQ_CNTL), \ @@ -210,7 +210,6 @@ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define HWSEQ_DCN2_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -276,9 +275,7 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_REG_LIST()\ HWSEQ_DCN_REG_LIST(), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ @@ -329,7 +326,6 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ BL_REG_LIST() -#endif struct dce_hwseq_registers { @@ -577,7 +573,6 @@ struct dce_hwseq_registers { HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -637,9 +632,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -679,9 +672,9 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) -#endif #define HWSEQ_REG_FIELD_LIST(type) \ type DCFE_CLOCK_ENABLE; \ @@ -799,8 +792,7 @@ struct dce_hwseq_registers { type D2VGA_MODE_ENABLE; \ type D3VGA_MODE_ENABLE; \ type D4VGA_MODE_ENABLE; \ - type AZALIA_AUDIO_DTO_MODULE;\ - type HPO_HDMISTREAMCLK_GATE_DIS; + type AZALIA_AUDIO_DTO_MODULE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) @@ -819,6 +811,10 @@ enum blnd_mode { BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */ }; +struct dce_hwseq; +struct pipe_ctx; +struct clock_source; + void dce_enable_fe_clock(struct dce_hwseq *hwss, unsigned int inst, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c index 35a75398fcb4..dd41736bb5c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c @@ -31,7 +31,7 @@ bool dce_i2c_submit_command( struct i2c_command *cmd) { struct dce_i2c_hw *dce_i2c_hw; - struct dce_i2c_sw *dce_i2c_sw; + struct dce_i2c_sw dce_i2c_sw = {0}; if (!ddc) { BREAK_TO_DEBUGGER(); @@ -43,18 +43,15 @@ bool dce_i2c_submit_command( return false; } - /* The software engine is only available on dce8 */ - dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc); - - if (!dce_i2c_sw) { - dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); - - if (!dce_i2c_hw) - return false; + dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); + if (dce_i2c_hw) return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw); - } - return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw); + dce_i2c_sw.ctx = ddc->ctx; + if (dce_i2c_engine_acquire_sw(&dce_i2c_sw, ddc)) { + return dce_i2c_submit_command_sw(pool, ddc, cmd, &dce_i2c_sw); + } + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index aad7b52165be..1cd4d8fc361f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -296,9 +296,7 @@ static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t reset_length = 0; -#endif /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); @@ -322,14 +320,12 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 2, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) } else { reset_length = dce_i2c_hw->send_reset_length; REG_UPDATE_N(SETUP, 3, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); -#endif } /* Program HW priority * set to High - interrupt software I2C at any time @@ -705,7 +701,6 @@ void dcn1_i2c_hw_construct( dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, @@ -724,4 +719,3 @@ void dcn2_i2c_hw_construct( if (ctx->dc->debug.scl_reset_length10) dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index cb0234e5d597..d4b2037f7d74 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -177,9 +177,7 @@ struct dce_i2c_shift { uint8_t DC_I2C_INDEX; uint8_t DC_I2C_INDEX_WRITE; uint8_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint8_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif uint8_t DC_I2C_REG_RW_CNTL_STATUS; }; @@ -220,17 +218,13 @@ struct dce_i2c_mask { uint32_t DC_I2C_INDEX; uint32_t DC_I2C_INDEX_WRITE; uint32_t XTAL_REF_DIV; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t DC_I2C_DDC1_SEND_RESET_LENGTH; -#endif uint32_t DC_I2C_REG_RW_CNTL_STATUS; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define I2C_COMMON_MASK_SH_LIST_DCN2(mask_sh)\ I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh),\ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH, mask_sh) -#endif struct dce_i2c_registers { uint32_t SETUP; @@ -312,7 +306,6 @@ void dcn1_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn2_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, @@ -320,7 +313,6 @@ void dcn2_i2c_hw_construct( const struct dce_i2c_registers *regs, const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks); -#endif bool dce_i2c_submit_command_hw( struct resource_pool *pool, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index a5a11c251e25..87d8428df6c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -73,31 +73,6 @@ static void release_engine_dce_sw( dce_i2c_sw->ddc = NULL; } -static bool get_hw_supported_ddc_line( - struct ddc *ddc, - enum gpio_ddc_line *line) -{ - enum gpio_ddc_line line_found; - - *line = GPIO_DDC_LINE_UNKNOWN; - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return false; - } - - if (!ddc->hw_info.hw_supported) - return false; - - line_found = dal_ddc_get_line(ddc); - - if (line_found >= GPIO_DDC_LINE_COUNT) - return false; - - *line = line_found; - - return true; -} static bool wait_for_scl_high_sw( struct dc_context *ctx, struct ddc *ddc, @@ -524,21 +499,3 @@ bool dce_i2c_submit_command_sw( return result; } -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( - struct resource_pool *pool, - struct ddc *ddc) -{ - enum gpio_ddc_line line; - struct dce_i2c_sw *engine = NULL; - - if (get_hw_supported_ddc_line(ddc, &line)) - engine = pool->sw_i2cs[line]; - - if (!engine) - return NULL; - - if (!dce_i2c_engine_acquire_sw(engine, ddc)) - return NULL; - - return engine; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h index 5bbcdd455614..019fc47bb767 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h @@ -49,9 +49,9 @@ bool dce_i2c_submit_command_sw( struct i2c_command *cmd, struct dce_i2c_sw *dce_i2c_sw); -struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( - struct resource_pool *pool, - struct ddc *ddc); +bool dce_i2c_engine_acquire_sw( + struct dce_i2c_sw *dce_i2c_sw, + struct ddc *ddc_handle); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 31b698bf9cfc..8aa937f496c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif( } if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } @@ -636,11 +636,11 @@ static void dce_mi_free_dmif( 10, 3500); if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 6ed922a3c1cd..451574971b96 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -137,7 +137,7 @@ static void dce110_update_generic_info_packet( AFMT_GENERIC0_UPDATE, (packet_index == 0), AFMT_GENERIC2_UPDATE, (packet_index == 2)); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(AFMT_VBI_PACKET_CONTROL1)) { switch (packet_index) { case 0: @@ -231,7 +231,7 @@ static void dce110_update_hdmi_info_packet( HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case 4: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, @@ -275,9 +275,10 @@ static void dce110_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t h_active_start; uint32_t v_active_start; uint32_t misc0 = 0; @@ -329,7 +330,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (enc110->se_mask->DP_VID_N_MUL) REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); #endif @@ -340,7 +341,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(DP_MSA_MISC)) misc1 = REG_READ(DP_MSA_MISC); #endif @@ -374,7 +375,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( /* set dynamic range and YCbCr range */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; @@ -454,7 +455,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( DP_DYN_RANGE, dynamic_range_rgb, DP_YCBCR_RANGE, dynamic_range_ycbcr); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (REG(DP_MSA_COLORIMETRY)) REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); @@ -489,7 +490,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( hw_crtc_timing.v_front_porch; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* start at begining of left border */ if (REG(DP_MSA_TIMING_PARAM2)) REG_SET_2(DP_MSA_TIMING_PARAM2, 0, @@ -786,7 +787,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets( dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) if (enc110->se_mask->HDMI_DB_DISABLE) { /* for bring up, disable dp double TODO */ if (REG(HDMI_DB_CONTROL)) @@ -824,7 +825,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC1_LINE, 0, HDMI_GENERIC1_SEND, 0); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* stop generic packets 2 & 3 on HDMI */ if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 799d36299c9b..753cb8edd996 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c @@ -26,7 +26,6 @@ #include "dc.h" #include "core_types.h" #include "clk_mgr.h" -#include "hw_sequencer.h" #include "dce100_hw_sequencer.h" #include "resource.h" @@ -136,7 +135,7 @@ void dce100_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index a6b80fdaa666..34518da20009 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE100_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; struct dc_state; diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 76d54885374a..8f78bf9abbca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = { .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) @@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -679,7 +725,7 @@ void dce100_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce100_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -839,7 +885,7 @@ static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce100_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -904,7 +950,7 @@ static const struct resource_funcs dce100_res_pool_funcs = { .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link }; -static bool construct( +static bool dce100_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -997,6 +1043,8 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; + dc->caps.extended_aux_timeout_support = false; + for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( @@ -1074,7 +1122,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce100_resource_destruct(pool); return false; } @@ -1089,7 +1137,7 @@ struct resource_pool *dce100_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce100_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 01a924bf477a..4939cf3b316f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -61,6 +61,8 @@ #include "atomfirmware.h" +#define GAMMA_HW_POINTS_NUM 256 + /* * All values are in milliseconds; * For eDP, after power-up/power/down, @@ -268,7 +270,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params, } static bool -dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; @@ -596,7 +598,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, } static bool -dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { struct transform *xfm = pipe_ctx->plane_res.xfm; @@ -651,10 +653,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; - struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct dc_link *link = pipe_ctx->stream->link; - + const struct dc *dc = link->dc; uint32_t active_total_with_borders; uint32_t early_control = 0; @@ -667,7 +668,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - link->dc->hwss.update_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = @@ -943,27 +944,23 @@ void dce110_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc; - struct pp_smu_funcs *pp_smu = NULL; + struct dc *dc; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; if (!pipe_ctx->stream) return; - core_dc = pipe_ctx->stream->ctx->dc; - clk_mgr = core_dc->clk_mgr; + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; - if (core_dc->res_pool->pp_smu) - pp_smu = core_dc->res_pool->pp_smu; - if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ - if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) + if (dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL) num_audio++; } @@ -984,7 +981,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) { struct dc *dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; if (!pipe_ctx || !pipe_ctx->stream) @@ -1001,9 +997,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; - if (dc->res_pool->pp_smu) - pp_smu = dc->res_pool->pp_smu; - if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1055,6 +1048,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; @@ -1064,7 +1058,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } @@ -1072,9 +1066,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, false); + hws->funcs.edp_backlight_control(link, false); dc_link_set_abm_disable(link); } @@ -1169,8 +1164,9 @@ static void build_audio_output( } } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (state->clk_mgr && + (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { audio_output->pll_info.dp_dto_source_clock_in_khz = state->clk_mgr->funcs->get_dp_ref_clk_frequency( state->clk_mgr); @@ -1230,7 +1226,7 @@ static void program_scaler(const struct dc *dc, { struct tg_color color = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* TOFPGA */ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL) return; @@ -1329,12 +1325,11 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc_stream_state *stream = pipe_ctx->stream; struct drr_params params = {0}; unsigned int event_triggers = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; -#endif + struct dce_hwseq *hws = dc->hwseq; - if (dc->hwss.disable_stream_gating) { - dc->hwss.disable_stream_gating(dc, pipe_ctx); + if (hws->funcs.disable_stream_gating) { + hws->funcs.disable_stream_gating(dc, pipe_ctx); } if (pipe_ctx->stream_res.audio != NULL) { @@ -1364,10 +1359,10 @@ static enum dc_status apply_single_controller_ctx_to_hw( /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) - dc->hwss.enable_stream_timing(pipe_ctx, context, dc); + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); - if (dc->hwss.setup_vupdate_interrupt) - dc->hwss.setup_vupdate_interrupt(pipe_ctx); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1397,7 +1392,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) while (odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( odm_pipe->stream_res.opp, @@ -1411,14 +1405,13 @@ static enum dc_status apply_single_controller_ctx_to_hw( &stream->clamping); odm_pipe = odm_pipe->next_odm_pipe; } -#endif if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - pipe_ctx->stream->link->psr_enabled = false; + pipe_ctx->stream->link->psr_feature_enabled = false; return DC_OK; } @@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { int i; - enum connector_id connector_id; - enum signal_type signal = SIGNAL_TYPE_NONE; /* do not know BIOS back-front mapping, simply blank all. It will not * hurt for non-DP @@ -1440,15 +1431,15 @@ static void power_down_encoders(struct dc *dc) } for (i = 0; i < dc->link_count; i++) { - connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id); - if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || - (connector_id == CONNECTOR_ID_EDP)) { + enum signal_type signal = dc->links[i]->connector_signal; + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); - if (connector_id == CONNECTOR_ID_EDP) - signal = SIGNAL_TYPE_EDP; - } + + if (signal != SIGNAL_TYPE_EDP) + signal = SIGNAL_TYPE_NONE; dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); @@ -1529,18 +1520,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context) return NULL; } -static struct dc_link *get_edp_link(struct dc *dc) -{ - int i; - - // report any eDP links, even unconnected DDI's - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) - return dc->links[i]; - } - return NULL; -} - static struct dc_link *get_edp_link_with_sink( struct dc *dc, struct dc_state *context) @@ -1576,9 +1555,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + struct dce_hwseq *hws = dc->hwseq; - if (dc->hwss.init_pipes) - dc->hwss.init_pipes(dc, context); + if (hws->funcs.init_pipes) + hws->funcs.init_pipes(dc, context); edp_stream = get_edp_stream(context); @@ -1615,7 +1595,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ - dc->hwss.edp_backlight_control(edp_link_with_sink, false); + hws->funcs.edp_backlight_control(edp_link_with_sink, false); } /*resume from S3, no vbios posting, no need to power down again*/ power_down_all_hw_blocks(dc); @@ -1834,7 +1814,7 @@ static bool should_enable_fbc(struct dc *dc, return false; /* PSR should not be enabled */ - if (pipe_ctx->stream->link->psr_enabled) + if (pipe_ctx->stream->link->psr_feature_enabled) return false; /* Nothing to compress */ @@ -2030,13 +2010,14 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; /* Reset old context */ /* look up the targets that have been removed since last commit */ - dc->hwss.reset_hw_ctx_wrap(dc, context); + hws->funcs.reset_hw_ctx_wrap(dc, context); /* Skip applying if no targets */ if (context->stream_count <= 0) @@ -2061,7 +2042,7 @@ enum dc_status dce110_apply_ctx_to_hw( continue; } - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, dc->ctx->dc_bios, PIPE_GATING_CONTROL_DISABLE); } @@ -2370,19 +2351,20 @@ static void init_hw(struct dc *dc) struct transform *xfm; struct abm *abm; struct dmcu *dmcu; + struct dce_hwseq *hws = dc->hwseq; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { xfm = dc->res_pool->transforms[i]; xfm->funcs->transform_reset(xfm); - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_INIT); - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, i, bp, PIPE_GATING_CONTROL_DISABLE); - dc->hwss.enable_display_pipe_clock_gating( + hws->funcs.enable_display_pipe_clock_gating( dc->ctx, true); } @@ -2464,17 +2446,15 @@ static void dce110_program_front_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx) { struct mem_input *mi = pipe_ctx->plane_res.mi; - struct pipe_ctx *old_pipe = NULL; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; unsigned int i; + struct dce_hwseq *hws = dc->hwseq; + DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); - if (dc->current_state) - old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; - memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; @@ -2530,10 +2510,10 @@ static void dce110_program_front_end_for_pipe( if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( "Pipe:%d %p: addr hi:0x%x, " @@ -2636,6 +2616,7 @@ static void dce110_apply_ctx_for_surface( static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx; @@ -2643,7 +2624,7 @@ static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream) return; - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE); dc->res_pool->transforms[fe_idx]->funcs->transform_reset( @@ -2732,14 +2713,10 @@ static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, .program_output_csc = program_output_csc, .init_hw = init_hw, - .init_pipes = init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, .update_plane_addr = update_plane_addr, .update_pending_status = dce110_update_pending_status, - .set_input_transfer_func = dce110_set_input_transfer_func, - .set_output_transfer_func = dce110_set_output_transfer_func, - .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dce110_enable_timing_synchronization, .enable_per_frame_crtc_position_reset = dce110_enable_per_frame_crtc_position_reset, @@ -2750,8 +2727,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { .blank_stream = dce110_blank_stream, .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, - .enable_display_power_gating = dce110_enable_display_power_gating, .disable_plane = dce110_power_down_fe, .pipe_control_lock = dce_pipe_control_lock, .prepare_bandwidth = dce110_prepare_bandwidth, @@ -2759,22 +2734,33 @@ static const struct hw_sequencer_funcs dce110_funcs = { .set_drr = set_drr, .get_position = get_position, .set_static_screen_control = set_static_screen_control, - .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, - .enable_stream_timing = dce110_enable_stream_timing, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .set_cursor_position = dce110_set_cursor_position, .set_cursor_attribute = dce110_set_cursor_attribute }; +static const struct hwseq_private_funcs dce110_private_funcs = { + .init_pipes = init_pipes, + .update_plane_addr = update_plane_addr, + .set_input_transfer_func = dce110_set_input_transfer_func, + .set_output_transfer_func = dce110_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating, + .enable_display_power_gating = dce110_enable_display_power_gating, + .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, + .enable_stream_timing = dce110_enable_stream_timing, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .edp_backlight_control = dce110_edp_backlight_control, +}; + void dce110_hw_sequencer_construct(struct dc *dc) { dc->hwss = dce110_funcs; + dc->hwseq->funcs = dce110_private_funcs; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index 2f9b7dbdf415..26a9c14a58b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -27,8 +27,8 @@ #define __DC_HWSS_DCE110_H__ #include "core_types.h" +#include "hw_sequencer_private.h" -#define GAMMA_HW_POINTS_NUM 256 struct dc; struct dc_state; struct dm_pp_display_configuration; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 89620adc81d8..bf14e9ab040c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ @@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -736,7 +782,7 @@ void dce110_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce110_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -1051,6 +1097,7 @@ static struct pipe_ctx *dce110_acquire_underlay( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; + struct dce_hwseq *hws = dc->hwseq; struct resource_context *res_ctx = &context->res_ctx; unsigned int underlay_idx = pool->underlay_pipe_index; struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; @@ -1071,7 +1118,7 @@ static struct pipe_ctx *dce110_acquire_underlay( struct tg_color black_color = {0}; struct dc_bios *dcb = dc->ctx->dc_bios; - dc->hwss.enable_display_power_gating( + hws->funcs.enable_display_power_gating( dc, pipe_ctx->stream_res.tg->inst, dcb, PIPE_GATING_CONTROL_DISABLE); @@ -1115,7 +1162,7 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce110_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1267,7 +1314,7 @@ const struct resource_caps *dce110_resource_cap( return &carrizo_resource_cap; } -static bool construct( +static bool dce110_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool, @@ -1293,6 +1340,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * @@ -1445,7 +1493,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce110_resource_destruct(pool); return false; } @@ -1460,7 +1508,7 @@ struct resource_pool *dce110_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool, asic_id)) + if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c index 1e4a7c13f0ed..19873ee1f78d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c @@ -158,6 +158,6 @@ void dce112_hw_sequencer_construct(struct dc *dc) * structure */ dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce112_enable_display_power_gating; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h index e646f4a37fa2..943f1b2c5b2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE112_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 21a657e79306..700ad8b3e54b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ @@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -698,7 +744,7 @@ void dce112_clock_source_destroy(struct clock_source **clk_src) *clk_src = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce112_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -967,7 +1013,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce112_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -1140,7 +1186,7 @@ const struct resource_caps *dce112_resource_cap( return &polaris_10_resource_cap; } -static bool construct( +static bool dce112_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1163,7 +1209,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; - + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * @@ -1326,7 +1372,7 @@ static bool construct( return true; res_create_fail: - destruct(pool); + dce112_resource_destruct(pool); return false; } @@ -1340,7 +1386,7 @@ struct resource_pool *dce112_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce112_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 1ca30928025e..66a13aa39c95 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -265,7 +265,7 @@ void dce120_hw_sequencer_construct(struct dc *dc) * structure */ dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.update_dchub = dce120_update_dchub; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h index c51afbd0b012..bc024534732f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE120_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 7c52f7f9196c..53ab88ef71f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -63,8 +63,8 @@ #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" -#include "mmhub/mmhub_9_4_0_offset.h" -#include "mmhub/mmhub_9_4_0_sh_mask.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "mmhub/mmhub_1_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" @@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE12_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE12_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ @@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ @@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -545,7 +587,7 @@ static void dce120_transform_destroy(struct transform **xfm) *xfm = NULL; } -static void destruct(struct dce110_resource_pool *pool) +static void dce120_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); @@ -826,7 +872,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce120_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -978,7 +1024,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static bool construct( +static bool dce120_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dce110_resource_pool *pool) @@ -1006,7 +1052,7 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; - + dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* @@ -1191,7 +1237,7 @@ controller_create_fail: clk_src_create_fail: res_create_fail: - destruct(pool); + dce120_resource_destruct(pool); return false; } @@ -1206,7 +1252,7 @@ struct resource_pool *dce120_create_resource_pool( if (!pool) return NULL; - if (construct(num_virtual_links, dc, pool)) + if (dce120_resource_construct(num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index c4543178ba20..893261c81854 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -74,7 +74,7 @@ void dce80_hw_sequencer_construct(struct dc *dc) { dce110_hw_sequencer_construct(dc); - dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h index 7a1b31def66f..e43af832d00c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h @@ -27,6 +27,7 @@ #define __DC_HWSS_DCE80_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 643ccb0ade00..2ad5c28c6e66 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ @@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -727,7 +773,7 @@ static struct input_pixel_processor *dce80_ipp_create( return &ipp->base; } -static void destruct(struct dce110_resource_pool *pool) +static void dce80_resource_destruct(struct dce110_resource_pool *pool) { unsigned int i; @@ -855,7 +901,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - destruct(dce110_pool); + dce80_resource_destruct(dce110_pool); kfree(dce110_pool); *pool = NULL; } @@ -895,6 +941,7 @@ static bool dce80_construct( dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * @@ -1046,7 +1093,7 @@ static bool dce80_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } @@ -1243,7 +1290,7 @@ static bool dce81_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } @@ -1436,7 +1483,7 @@ static bool dce83_construct( return true; res_create_fail: - destruct(pool); + dce80_resource_destruct(pool); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..62ad1a11bff9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,7 +22,8 @@ # # Makefile for DCN. -DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ +DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ + dcn10_hw_sequencer_debug.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 01c7e30b9ce1..bbd6e01b3eca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + // All 3 color channels have same x corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); @@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format( i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); @@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); corner_points[0].green.x = corner_points[0].red.x; @@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format( i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index d8b2da18db39..0e682b5aa3eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) -static bool dpp_get_optimal_number_of_taps( +bool dpp1_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) @@ -290,12 +290,8 @@ void dpp1_cnv_setup ( enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut) -#else - enum dc_color_space input_color_space) -#endif { uint32_t pixel_format; uint32_t alpha_en; @@ -521,7 +517,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_read_state = dpp_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, .dpp_set_csc_default = dpp1_cm_set_output_csc_default, @@ -542,11 +538,9 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .dpp_program_blnd_lut = NULL, .dpp_program_shaper_lut = NULL, .dpp_program_3dlut = NULL -#endif }; static struct dpp_caps dcn10_dpp_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index e2c613611ac9..2edf566b3a72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1486,12 +1486,8 @@ void dpp1_cnv_setup ( enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else - enum dc_color_space input_color_space); -#endif void dpp1_full_bypass(struct dpp *dpp_base); @@ -1504,6 +1500,11 @@ void dpp1_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + void dpp1_construct(struct dcn10_dpp *dpp1, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index aa0c7a7d13a0..935c892622a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -352,6 +352,8 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, uint32_t i; struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + REG_SEQ_START(); + for (i = 0 ; i < num; i++) { REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); @@ -626,10 +628,16 @@ void dpp1_set_degamma( case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; default: BREAK_TO_DEBUGGER(); break; } + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); } void dpp1_degamma_ram_select( @@ -731,10 +739,8 @@ void dpp1_full_bypass(struct dpp *dpp_base) /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ if (dpp->tf_mask->CM_BYPASS_EN) REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); -#endif /* Setting degamma bypass for now */ REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index d67e0abeee93..fce37c527a0b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -218,14 +218,12 @@ static void dpp1_dscl_set_lb( INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) else { /* DSCL caps: pixel data processed in float format */ REG_SET_2(LB_DATA_FORMAT, 0, INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ } -#endif REG_SET_2(LB_MEMORY_CTRL, 0, MEMORY_CONFIG, mem_size_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c index 374cc9acda3b..b6391a5ead78 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c @@ -23,7 +23,7 @@ * */ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "reg_helper.h" #include "resource.h" @@ -109,9 +109,7 @@ const struct dwbc_funcs dcn10_dwbc_funcs = { .update = NULL, .set_stereo = NULL, .set_new_content = NULL, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .set_warmup = NULL, -#endif .dwb_set_scaler = NULL, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h index c175edd0bae7..d56ea7c8171e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h @@ -24,7 +24,7 @@ #ifndef __DC_DWBC_DCN10_H__ #define __DC_DWBC_DCN10_H__ -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN */ #define BASE_INNER(seg) \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 69d903d68661..af57751253de 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -121,7 +121,6 @@ struct dcn_hubbub_registers { uint32_t DCN_VM_AGP_BASE; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; @@ -140,7 +139,6 @@ struct dcn_hubbub_registers { uint32_t DCHVM_CLK_CTRL; uint32_t DCHVM_RIOMMU_CTRL0; uint32_t DCHVM_RIOMMU_STAT0; -#endif }; /* set field name */ @@ -232,7 +230,6 @@ struct dcn_hubbub_registers { type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define HUBBUB_HVM_REG_FIELD_LIST(type) \ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ @@ -278,22 +275,17 @@ struct dcn_hubbub_registers { type HOSTVM_POWERSTATUS; \ type RIOMMU_ACTIVE; \ type HOSTVM_PREFETCH_DONE -#endif struct dcn_hubbub_shift { DCN_HUBBUB_REG_FIELD_LIST(uint8_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) HUBBUB_HVM_REG_FIELD_LIST(uint8_t); -#endif }; struct dcn_hubbub_mask { DCN_HUBBUB_REG_FIELD_LIST(uint32_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) HUBBUB_HVM_REG_FIELD_LIST(uint32_t); -#endif }; struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 001db49e4bb2..4d1301e5eaf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -306,7 +306,6 @@ void hubp1_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 12); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 112); @@ -327,7 +326,6 @@ void hubp1_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 119); break; -#endif default: BREAK_TO_DEBUGGER(); break; @@ -812,7 +810,8 @@ static void hubp1_set_vm_context0_settings(struct hubp *hubp, void min_set_viewport( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c) + const struct rect *viewport_c, + enum dc_rotation_angle rotation) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); @@ -841,6 +840,14 @@ void min_set_viewport( REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, PRI_VIEWPORT_X_START_C, viewport_c->x, PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); } void hubp1_read_state_common(struct hubp *hubp) @@ -1006,6 +1013,9 @@ void hubp1_read_state_common(struct hubp *hubp) HUBP_TTU_DISABLE, &s->ttu_disable, HUBP_UNDERFLOW_STATUS, &s->underflow_status); + REG_GET(HUBP_CLK_CNTL, + HUBP_CLOCK_ENABLE, &s->clock_en); + REG_GET(DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, &s->min_ttu_vblank); @@ -1240,10 +1250,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_get_underflow_status = hubp1_get_underflow_status, .hubp_init = hubp1_init, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .dmdata_set_attributes = NULL, .dmdata_load = NULL, -#endif }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index cb20d10288c0..e44eaae5033b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -47,6 +47,8 @@ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ @@ -57,8 +59,12 @@ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\ @@ -150,6 +156,8 @@ uint32_t DCSURF_SEC_VIEWPORT_START; \ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \ uint32_t DCSURF_PRI_VIEWPORT_START_C; \ + uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \ + uint32_t DCSURF_SEC_VIEWPORT_START_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \ @@ -160,8 +168,12 @@ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \ uint32_t DCSURF_SURFACE_INUSE; \ uint32_t DCSURF_SURFACE_INUSE_HIGH; \ uint32_t DCSURF_SURFACE_INUSE_C; \ @@ -279,6 +291,10 @@ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\ @@ -289,8 +305,12 @@ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\ @@ -469,6 +489,10 @@ type PRI_VIEWPORT_HEIGHT_C; \ type PRI_VIEWPORT_X_START_C; \ type PRI_VIEWPORT_Y_START_C; \ + type SEC_VIEWPORT_WIDTH_C; \ + type SEC_VIEWPORT_HEIGHT_C; \ + type SEC_VIEWPORT_X_START_C; \ + type SEC_VIEWPORT_Y_START_C; \ type PRIMARY_SURFACE_ADDRESS_HIGH;\ type PRIMARY_SURFACE_ADDRESS;\ type SECONDARY_SURFACE_ADDRESS_HIGH;\ @@ -479,8 +503,12 @@ type SECONDARY_META_SURFACE_ADDRESS;\ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_SURFACE_ADDRESS_C;\ + type SECONDARY_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_SURFACE_ADDRESS_C;\ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_META_SURFACE_ADDRESS_C;\ + type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_META_SURFACE_ADDRESS_C;\ type SURFACE_INUSE_ADDRESS;\ type SURFACE_INUSE_ADDRESS_HIGH;\ type SURFACE_INUSE_ADDRESS_C;\ @@ -642,6 +670,7 @@ struct dcn_hubp_state { uint32_t sw_mode; uint32_t dcc_en; uint32_t blank_en; + uint32_t clock_en; uint32_t underflow_status; uint32_t ttu_disable; uint32_t min_ttu_vblank; @@ -700,13 +729,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, enum hubp_ind_block_size independent_64b_blks); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hubp1_program_surface_flip_and_addr( struct hubp *hubp, const struct dc_plane_address *address, bool flip_immediate); -#endif bool hubp1_is_flip_pending(struct hubp *hubp); void hubp1_cursor_set_attributes( @@ -722,7 +749,9 @@ void hubp1_set_blank(struct hubp *hubp, bool blank); void min_set_viewport(struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c); + const struct rect *viewport_c, + enum dc_rotation_angle rotation); +/* rotation angle added for use by hubp21_set_viewport */ void hubp1_clk_cntl(struct hubp *hubp, bool enable); void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 60123db7ba02..3996fef56948 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -25,17 +25,18 @@ #include <linux/delay.h> #include "dm_services.h" +#include "basics/dc_common.h" #include "core_types.h" #include "resource.h" #include "custom_float.h" #include "dcn10_hw_sequencer.h" -#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer_debug.h" #include "dce/dce_hwseq.h" #include "abm.h" #include "dmcu.h" #include "dcn10_optc.h" -#include "dcn10/dcn10_dpp.h" -#include "dcn10/dcn10_mpc.h" +#include "dcn10_dpp.h" +#include "dcn10_mpc.h" #include "timing_generator.h" #include "opp.h" #include "ipp.h" @@ -49,9 +50,7 @@ #include "clk_mgr.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dsc.h" -#endif #define DC_LOGGER_INIT(logger) @@ -68,6 +67,8 @@ #define DTN_INFO_MICRO_SEC(ref_cycle) \ print_microsec(dc_ctx, log_ctx, ref_cycle) +#define GAMMA_HW_POINTS_NUM 256 + void print_microsec(struct dc_context *dc_ctx, struct dc_log_buffer_ctx *log_ctx, uint32_t ref_cycle) @@ -81,6 +82,33 @@ void print_microsec(struct dc_context *dc_ctx, us_x10 % frac); } +static void dcn10_lock_all_pipes(struct dc *dc, + struct dc_state *context, + bool lock) +{ + struct pipe_ctx *pipe_ctx; + struct timing_generator *tg; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + tg = pipe_ctx->stream_res.tg; + /* + * Only lock the top pipe's tg to prevent redundant + * (un)locking. Also skip if pipe is disabled. + */ + if (pipe_ctx->top_pipe || + !pipe_ctx->stream || !pipe_ctx->plane_state || + !tg->funcs->is_tg_enabled(tg)) + continue; + + if (lock) + tg->funcs->lock(tg); + else + tg->funcs->unlock(tg); + } +} + static void log_mpc_crc(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) { @@ -129,9 +157,8 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) struct resource_pool *pool = dc->res_pool; int i; - DTN_INFO("HUBP: format addr_hi width height" - " rot mir sw_mode dcc_en blank_en ttu_dis underflow" - " min_ttu_vblank qos_low_wm qos_high_wm\n"); + DTN_INFO( + "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n"); for (i = 0; i < pool->pipe_count; i++) { struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); @@ -139,8 +166,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) hubp->funcs->hubp_read_state(hubp); if (!s->blank_en) { - DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" - " %6d %8d %7d %8xh", + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh", hubp->inst, s->pixel_format, s->inuse_addr_hi, @@ -151,6 +177,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) s->sw_mode, s->dcc_en, s->blank_en, + s->clock_en, s->ttu_disable, s->underflow_status); DTN_INFO_MICRO_SEC(s->min_ttu_vblank); @@ -308,21 +335,31 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); - DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel" - " h_bs h_be h_ss h_se hpol htot vtot underflow\n"); + DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n"); for (i = 0; i < pool->timing_generator_count; i++) { struct timing_generator *tg = pool->timing_generators[i]; struct dcn_otg_state s = {0}; - + /* Read shared OTG state registers for all DCNx */ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + /* + * For DCN2 and greater, a register on the OPP is used to + * determine if the CRTC is blanked instead of the OTG. So use + * dpg_is_blanked() if exists, otherwise fallback on otg. + * + * TODO: Implement DCN-specific read_otg_state hooks. + */ + if (pool->opps[i]->funcs->dpg_is_blanked) + s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); + else + s.blank_enabled = tg->funcs->is_blanked(tg); + //only print if OTG master is enabled if ((s.otg_enabled & 1) == 0) continue; - DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d" - " %5d %5d %5d %5d %9d\n", + DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n", tg->inst, s.v_blank_start, s.v_blank_end, @@ -340,7 +377,8 @@ void dcn10_log_hw_state(struct dc *dc, s.h_sync_a_pol, s.h_total, s.v_total, - s.underflow_occurred_status); + s.underflow_occurred_status, + s.blank_enabled); // Clear underflow for debug purposes // We want to keep underflow sticky bit on for the longevity tests outside of test environment. @@ -350,7 +388,6 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); for (i = 0; i < pool->res_cap->num_dsc; i++) { struct display_stream_compressor *dsc = pool->dscs[i]; @@ -387,7 +424,7 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); - DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS\n"); + DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n"); for (i = 0; i < dc->link_count; i++) { struct link_encoder *lenc = dc->links[i]->link_enc; @@ -395,16 +432,16 @@ void dcn10_log_hw_state(struct dc *dc, if (lenc->funcs->read_state) { lenc->funcs->read_state(lenc, &s); - DTN_INFO("[%-3d]: %-12d %-22d %-22d\n", + DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n", i, s.dphy_fec_en, s.dphy_fec_ready_shadow, - s.dphy_fec_active_status); + s.dphy_fec_active_status, + s.dp_link_training_complete); DTN_INFO("\n"); } } DTN_INFO("\n"); -#endif DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", @@ -438,7 +475,7 @@ bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx) return false; } -static void dcn10_enable_power_gating_plane( +void dcn10_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { @@ -460,7 +497,7 @@ static void dcn10_enable_power_gating_plane( REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); } -static void dcn10_disable_vga( +void dcn10_disable_vga( struct dce_hwseq *hws) { unsigned int in_vga1_mode = 0; @@ -493,7 +530,7 @@ static void dcn10_disable_vga( REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); } -static void dcn10_dpp_pg_control( +void dcn10_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) @@ -545,7 +582,7 @@ static void dcn10_dpp_pg_control( } } -static void dcn10_hubp_pg_control( +void dcn10_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -605,8 +642,8 @@ static void power_on_plane( if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true); - hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true); + hws->funcs.dpp_pg_control(hws, plane_id, true); + hws->funcs.hubp_pg_control(hws, plane_id, true); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); DC_LOG_DEBUG( @@ -627,7 +664,7 @@ static void undo_DEGVIDCN10_253_wa(struct dc *dc) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.hubp_pg_control(hws, 0, false); + hws->funcs.hubp_pg_control(hws, 0, false); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -656,7 +693,7 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.hubp_pg_control(hws, 0, true); + hws->funcs.hubp_pg_control(hws, 0, true); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -664,12 +701,16 @@ static void apply_DEGVIDCN10_253_wa(struct dc *dc) hws->wa_state.DEGVIDCN10_253_applied = true; } -static void dcn10_bios_golden_init(struct dc *dc) +void dcn10_bios_golden_init(struct dc *dc) { + struct dce_hwseq *hws = dc->hwseq; struct dc_bios *bp = dc->ctx->dc_bios; int i; bool allow_self_fresh_force_enable = true; + if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc)) + return; + if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -728,7 +769,7 @@ static void false_optc_underflow_wa( tg->funcs->clear_optc_underflow(tg); } -static enum dc_status dcn10_enable_stream_timing( +enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -974,8 +1015,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; int dpp_id = pipe_ctx->plane_res.dpp->inst; struct mpc *mpc = dc->res_pool->mpc; @@ -1000,10 +1042,10 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_disconnect(hubp); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } -static void dcn10_plane_atomic_power_down(struct dc *dc, +void dcn10_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp) { @@ -1013,8 +1055,8 @@ static void dcn10_plane_atomic_power_down(struct dc *dc, if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dc->hwss.dpp_pg_control(hws, dpp->inst, false); - dc->hwss.hubp_pg_control(hws, hubp->inst, false); + hws->funcs.dpp_pg_control(hws, dpp->inst, false); + hws->funcs.hubp_pg_control(hws, hubp->inst, false); dpp->funcs->dpp_reset(dpp); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); @@ -1026,8 +1068,9 @@ static void dcn10_plane_atomic_power_down(struct dc *dc, /* disable HW used by plane. * note: cannot disable until disconnect is complete */ -static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; int opp_id = hubp->opp_id; @@ -1046,7 +1089,7 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->power_gated = true; dc->optimized_required = false; /* We're powering off, no need to optimize */ - dc->hwss.plane_atomic_power_down(dc, + hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.hubp); @@ -1058,14 +1101,15 @@ static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) return; - dc->hwss.plane_atomic_disable(dc, pipe_ctx); + hws->funcs.plane_atomic_disable(dc, pipe_ctx); apply_DEGVIDCN10_253_wa(dc); @@ -1073,9 +1117,10 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->pipe_idx); } -static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) +void dcn10_init_pipes(struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; bool can_apply_seamless_boot = false; for (i = 0; i < context->stream_count; i++) { @@ -1100,8 +1145,8 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) * command table. */ if (tg->funcs->is_tg_enabled(tg)) { - if (dc->hwss.init_blank != NULL) { - dc->hwss.init_blank(dc, tg); + if (hws->funcs.init_blank != NULL) { + hws->funcs.init_blank(dc, tg); tg->funcs->lock(tg); } else { tg->funcs->lock(tg); @@ -1158,7 +1203,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - dc->hwss.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); @@ -1172,7 +1217,7 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) } } -static void dcn10_init_hw(struct dc *dc) +void dcn10_init_hw(struct dc *dc) { int i; struct abm *abm = dc->res_pool->abm; @@ -1204,15 +1249,15 @@ static void dcn10_init_hw(struct dc *dc) } //Enable ability to power gate / don't force power on permanently - dc->hwss.enable_power_gating_plane(hws, true); + hws->funcs.enable_power_gating_plane(hws, true); return; } if (!dcb->funcs->is_accelerated_mode(dcb)) - dc->hwss.disable_vga(dc->hwseq); + hws->funcs.disable_vga(dc->hwseq); - dc->hwss.bios_golden_init(dc); + hws->funcs.bios_golden_init(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; @@ -1254,11 +1299,9 @@ static void dcn10_init_hw(struct dc *dc) } /* Power gate DSCs */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (dc->hwss.dsc_pg_control != NULL) - dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); -#endif + if (hws->funcs.dsc_pg_control != NULL) + hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1267,7 +1310,7 @@ static void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - dc->hwss.init_pipes(dc, dc->current_state); + hws->funcs.init_pipes(dc, dc->current_state); } for (i = 0; i < res_pool->audio_count; i++) { @@ -1281,7 +1324,7 @@ static void dcn10_init_hw(struct dc *dc) abm->funcs->abm_init(abm); } - if (dmcu != NULL) + if (dmcu != NULL && !dmcu->auto_load_dmcu) dmcu->funcs->dmcu_init(dmcu); if (abm != NULL && dmcu != NULL) @@ -1299,14 +1342,19 @@ static void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - dc->hwss.enable_power_gating_plane(dc->hwseq, true); + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + + if (dc->clk_mgr->funcs->notify_wm_ranges) + dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); + } -static void dcn10_reset_hw_ctx_wrap( +void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -1325,8 +1373,8 @@ static void dcn10_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); - if (dc->hwss.enable_stream_gating) - dc->hwss.enable_stream_gating(dc, pipe_ctx); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -1359,9 +1407,7 @@ static bool patch_address_for_sbs_tb_stereo( return false; } - - -static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool addr_patched = false; PHYSICAL_ADDRESS_LOC addr; @@ -1386,8 +1432,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; } -static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state) +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; @@ -1419,6 +1465,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: + dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); + cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); + result = true; + break; default: result = false; break; @@ -1452,21 +1503,20 @@ static void log_tf(struct dc_context *ctx, DC_LOG_ALL_TF_CHANNELS("Logging all channels..."); for (i = 0; i < hw_points_num; i++) { - DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) { - DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } } -static bool -dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream) +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream) { struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -1502,11 +1552,13 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static void dcn10_pipe_control_lock( +void dcn10_pipe_control_lock( struct dc *dc, struct pipe_ctx *pipe, bool lock) { + struct dce_hwseq *hws = dc->hwseq; + /* use TG master update lock to lock everything on the TG * therefore only top pipe need to lock */ @@ -1514,7 +1566,7 @@ static void dcn10_pipe_control_lock( return; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (lock) pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); @@ -1522,7 +1574,7 @@ static void dcn10_pipe_control_lock( pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } static bool wait_for_reset_trigger_to_occur( @@ -1562,7 +1614,7 @@ static bool wait_for_reset_trigger_to_occur( return rc; } -static void dcn10_enable_timing_synchronization( +void dcn10_enable_timing_synchronization( struct dc *dc, int group_index, int group_size, @@ -1592,7 +1644,7 @@ static void dcn10_enable_timing_synchronization( DC_SYNC_INFO("Sync complete\n"); } -static void dcn10_enable_per_frame_crtc_position_reset( +void dcn10_enable_per_frame_crtc_position_reset( struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -1617,10 +1669,10 @@ static void dcn10_enable_per_frame_crtc_position_reset( } /*static void print_rq_dlg_ttu( - struct dc *core_dc, + struct dc *dc, struct pipe_ctx *pipe_ctx) { - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML TTU Output parameters [%d] ==============\n" "qos_level_low_wm: %d, \n" "qos_level_high_wm: %d, \n" @@ -1650,7 +1702,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML DLG Output parameters [%d] ==============\n" "refcyc_h_blank_end: %d, \n" "dlg_vblank_end: %d, \n" @@ -1685,7 +1737,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\ndst_y_per_meta_row_nom_l: %d, \n" "refcyc_per_meta_chunk_nom_l: %d, \n" "refcyc_per_line_delivery_pre_l: %d, \n" @@ -1715,7 +1767,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( pipe_ctx->dlg_regs.refcyc_per_line_delivery_c ); - DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger, + DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, "\n============== DML RQ Output parameters [%d] ==============\n" "chunk_size: %d \n" "min_chunk_size: %d \n" @@ -1830,7 +1882,7 @@ static void dcn10_enable_plane( struct dce_hwseq *hws = dc->hwseq; if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } undo_DEGVIDCN10_253_wa(dc); @@ -1887,11 +1939,11 @@ static void dcn10_enable_plane( dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp); if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } } -static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) { int i = 0; struct dpp_grph_csc_adjustment adjust; @@ -1939,7 +1991,7 @@ static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint matrix[11] = rgb_bias; } -static void dcn10_program_output_csc(struct dc *dc, +void dcn10_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, @@ -1971,57 +2023,6 @@ static void dcn10_program_output_csc(struct dc *dc, } } -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - return false; -} - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - return false; -} - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) -{ - if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible) - return true; - if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe)) - return true; - if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe)) - return true; - return false; -} - -bool is_rgb_cspace(enum dc_color_space output_color_space) -{ - switch (output_color_space) { - case COLOR_SPACE_SRGB: - case COLOR_SPACE_SRGB_LIMITED: - case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_RGB_LIMITEDRANGE: - case COLOR_SPACE_ADOBERGB: - return true; - case COLOR_SPACE_YCBCR601: - case COLOR_SPACE_YCBCR709: - case COLOR_SPACE_YCBCR601_LIMITED: - case COLOR_SPACE_YCBCR709_LIMITED: - case COLOR_SPACE_2020_YCBCR: - return false; - default: - /* Add a case to switch */ - BREAK_TO_DEBUGGER(); - return false; - } -} - void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color) @@ -2095,70 +2096,7 @@ void dcn10_get_hdr_visual_confirm_color( } } -static uint16_t fixed_point_to_int_frac( - struct fixed31_32 arg, - uint8_t integer_bits, - uint8_t fractional_bits) -{ - int32_t numerator; - int32_t divisor = 1 << fractional_bits; - - uint16_t result; - - uint16_t d = (uint16_t)dc_fixpt_floor( - dc_fixpt_abs( - arg)); - - if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) - numerator = (uint16_t)dc_fixpt_floor( - dc_fixpt_mul_int( - arg, - divisor)); - else { - numerator = dc_fixpt_floor( - dc_fixpt_sub( - dc_fixpt_from_int( - 1LL << integer_bits), - dc_fixpt_recip( - dc_fixpt_from_int( - divisor)))); - } - - if (numerator >= 0) - result = (uint16_t)numerator; - else - result = (uint16_t)( - (1 << (integer_bits + fractional_bits + 1)) + numerator); - - if ((result != 0) && dc_fixpt_lt( - arg, dc_fixpt_zero)) - result |= 1 << (integer_bits + fractional_bits); - - return result; -} - -void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale, - const struct dc_plane_state *plane_state) -{ - if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN - && plane_state->format != SURFACE_PIXEL_FORMAT_INVALID - && plane_state->input_csc_color_matrix.enable_adjustment - && plane_state->coeff_reduction_factor.value != 0) { - bias_and_scale->scale_blue = fixed_point_to_int_frac( - dc_fixpt_mul(plane_state->coeff_reduction_factor, - dc_fixpt_from_fraction(256, 255)), - 2, - 13); - bias_and_scale->scale_red = bias_and_scale->scale_blue; - bias_and_scale->scale_green = bias_and_scale->scale_blue; - } else { - bias_and_scale->scale_blue = 0x2000; - bias_and_scale->scale_red = 0x2000; - bias_and_scale->scale_green = 0x2000; - } -} - -static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) +static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) { struct dc_bias_and_scale bns_params = {0}; @@ -2167,21 +2105,18 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 plane_state->color_space, NULL); -#else - plane_state->color_space); -#endif //set scale and bias registers - dcn10_build_prescale_params(&bns_params, plane_state); + build_prescale_params(&bns_params, plane_state); if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = {{0}}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; @@ -2191,10 +2126,10 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dcn10_get_hdr_visual_confirm_color( + hws->funcs.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dcn10_get_surface_visual_confirm_color( + hws->funcs.get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else { color_space_to_black_color( @@ -2276,11 +2211,12 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); } -void update_dchubp_dpp( +static void dcn10_update_dchubp_dpp( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -2304,8 +2240,7 @@ void update_dchubp_dpp( dc->res_pool->dccg->funcs->update_dpp_dto( dc->res_pool->dccg, dpp->inst, - pipe_ctx->plane_res.bw.dppclk_khz, - false); + pipe_ctx->plane_res.bw.dppclk_khz); else dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? dc->clk_mgr->clks.dispclk_khz / 2 : @@ -2335,12 +2270,12 @@ void update_dchubp_dpp( if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.bpp_change) - update_dpp(dpp, plane_state); + dcn10_update_dpp(dpp, plane_state); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || plane_state->update_flags.bits.global_alpha_change) - dc->hwss.update_mpcc(dc, pipe_ctx); + hws->funcs.update_mpcc(dc, pipe_ctx); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || @@ -2356,7 +2291,8 @@ void update_dchubp_dpp( hubp->funcs->mem_program_viewport( hubp, &pipe_ctx->plane_res.scl_data.viewport, - &pipe_ctx->plane_res.scl_data.viewport_c); + &pipe_ctx->plane_res.scl_data.viewport_c, + plane_state->rotation); } if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { @@ -2400,13 +2336,13 @@ void update_dchubp_dpp( hubp->power_gated = false; - dc->hwss.update_plane_addr(dc, pipe_ctx); + hws->funcs.update_plane_addr(dc, pipe_ctx); if (is_pipe_tree_visible(pipe_ctx)) hubp->funcs->set_blank(hubp, false); } -static void dcn10_blank_pixel_data( +void dcn10_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) @@ -2449,10 +2385,9 @@ static void dcn10_blank_pixel_data( } } -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx) { - struct fixed31_32 multiplier = dc_fixpt_from_fraction( - pipe_ctx->plane_state->sdr_white_level, 80); + struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; uint32_t hw_mult = 0x1f000; // 1.0 default multiplier struct custom_float_format fmt; @@ -2460,7 +2395,8 @@ void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) fmt.mantissa_bits = 12; fmt.sign = true; - if (pipe_ctx->plane_state->sdr_white_level > 80) + + if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 convert_to_custom_float_format(multiplier, &fmt, &hw_mult); pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier( @@ -2472,17 +2408,19 @@ void dcn10_program_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; + if (pipe_ctx->plane_state->update_flags.bits.full_update) dcn10_enable_plane(dc, pipe_ctx, context); - update_dchubp_dpp(dc, pipe_ctx, context); + dcn10_update_dchubp_dpp(dc, pipe_ctx, context); - set_hdr_multiplier(pipe_ctx); + hws->funcs.set_hdr_multiplier(pipe_ctx); if (pipe_ctx->plane_state->update_flags.bits.full_update || pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); /* dcn10_translate_regamma_to_hw_format takes 750us to finish * only do gamma programming for full update. @@ -2491,14 +2429,16 @@ void dcn10_program_pipe( * doing heavy calculation and programming */ if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); } -static void program_all_pipe_in_tree( +static void dcn10_program_all_pipe_in_tree( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; + if (pipe_ctx->top_pipe == NULL) { bool blank = !is_pipe_tree_visible(pipe_ctx); @@ -2512,18 +2452,20 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); + hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) - dcn10_program_pipe(dc, pipe_ctx, context); + hws->funcs.program_pipe(dc, pipe_ctx, context); if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) - program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); + dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); } -struct pipe_ctx *find_top_pipe_for_stream( +static struct pipe_ctx *dcn10_find_top_pipe_for_stream( struct dc *dc, struct dc_state *context, const struct dc_stream_state *stream) @@ -2547,19 +2489,20 @@ struct pipe_ctx *find_top_pipe_for_stream( return NULL; } -static void dcn10_apply_ctx_for_surface( +void dcn10_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; int i; struct timing_generator *tg; uint32_t underflow_check_delay_us; bool removed_pipe[4] = { false }; bool interdependent_update = false; struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); + dcn10_find_top_pipe_for_stream(dc, context, stream); DC_LOGGER_INIT(dc->ctx->logger); if (!top_pipe_to_program) @@ -2572,23 +2515,23 @@ static void dcn10_apply_ctx_for_surface( underflow_check_delay_us = dc->debug.underflow_assert_delay_us; - if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) - ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); if (interdependent_update) - lock_all_pipes(dc, context, true); + dcn10_lock_all_pipes(dc, context, true); else dcn10_pipe_control_lock(dc, top_pipe_to_program, true); if (underflow_check_delay_us != 0xFFFFFFFF) udelay(underflow_check_delay_us); - if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur) - ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program)); + if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) + ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); if (num_planes == 0) { /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true); } /* Disconnect unused mpcc */ @@ -2614,7 +2557,7 @@ static void dcn10_apply_ctx_for_surface( old_pipe_ctx->plane_state && old_pipe_ctx->stream_res.tg == tg) { - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx); removed_pipe[i] = true; DC_LOG_DC("Reset mpcc for pipe %d\n", @@ -2623,13 +2566,11 @@ static void dcn10_apply_ctx_for_surface( } if (num_planes > 0) - program_all_pipe_in_tree(dc, top_pipe_to_program, context); + dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); -#endif + if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree)) + hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context); if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2645,7 +2586,7 @@ static void dcn10_apply_ctx_for_surface( } if (interdependent_update) - lock_all_pipes(dc, context, false); + dcn10_lock_all_pipes(dc, context, false); else dcn10_pipe_control_lock(dc, top_pipe_to_program, false); @@ -2682,14 +2623,15 @@ static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *contex } } -static void dcn10_prepare_bandwidth( +void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2711,17 +2653,18 @@ static void dcn10_prepare_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } -static void dcn10_optimize_bandwidth( +void dcn10_optimize_bandwidth( struct dc *dc, struct dc_state *context) { + struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) @@ -2743,10 +2686,10 @@ static void dcn10_optimize_bandwidth( dcn_bw_notify_pplib_of_wm_ranges(dc); if (dc->debug.sanity_checks) - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } -static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, unsigned int vmin, unsigned int vmax, unsigned int vmid, unsigned int vmid_frame_number) { @@ -2774,7 +2717,7 @@ static void dcn10_set_drr(struct pipe_ctx **pipe_ctx, } } -static void dcn10_get_position(struct pipe_ctx **pipe_ctx, +void dcn10_get_position(struct pipe_ctx **pipe_ctx, int num_pipes, struct crtc_position *position) { @@ -2786,7 +2729,7 @@ static void dcn10_get_position(struct pipe_ctx **pipe_ctx, pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position); } -static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_events *events) { unsigned int i; @@ -2841,7 +2784,7 @@ static void dcn10_config_stereo_parameters( return; } -static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) { struct crtc_stereo_flags flags = { 0 }; struct dc_stream_state *stream = pipe_ctx->stream; @@ -2880,15 +2823,16 @@ static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_in return NULL; } -static void dcn10_wait_for_mpcc_disconnect( +void dcn10_wait_for_mpcc_disconnect( struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; int mpcc_inst; if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } if (!pipe_ctx->stream_res.opp) @@ -2905,12 +2849,12 @@ static void dcn10_wait_for_mpcc_disconnect( } if (dc->debug.sanity_checks) { - dcn10_verify_allow_pstate_change_high(dc); + hws->funcs.verify_allow_pstate_change_high(dc); } } -static bool dcn10_dummy_display_power_gating( +bool dcn10_dummy_display_power_gating( struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, @@ -2919,7 +2863,7 @@ static bool dcn10_dummy_display_power_gating( return true; } -static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) { struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct timing_generator *tg = pipe_ctx->stream_res.tg; @@ -2943,7 +2887,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) } } -static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) { struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub; @@ -2951,7 +2895,7 @@ static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh hubbub->funcs->update_dchub(hubbub, dh_data); } -static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -2965,15 +2909,30 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .rotation = pipe_ctx->plane_state->rotation, .mirror = pipe_ctx->plane_state->horizontal_mirror }; - uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x; - uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y; - uint32_t x_offset = min(x_plane, pos_cpy.x); - uint32_t y_offset = min(y_plane, pos_cpy.y); - pos_cpy.x -= x_offset; - pos_cpy.y -= y_offset; - pos_cpy.x_hotspot += (x_plane - x_offset); - pos_cpy.y_hotspot += (y_plane - y_offset); + int x_plane = pipe_ctx->plane_state->dst_rect.x; + int y_plane = pipe_ctx->plane_state->dst_rect.y; + int x_pos = pos_cpy.x; + int y_pos = pos_cpy.y; + + // translate cursor from stream space to plane space + x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / + pipe_ctx->plane_state->dst_rect.width; + y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / + pipe_ctx->plane_state->dst_rect.height; + + if (x_pos < 0) { + pos_cpy.x_hotspot -= x_pos; + x_pos = 0; + } + + if (y_pos < 0) { + pos_cpy.y_hotspot -= y_pos; + y_pos = 0; + } + + pos_cpy.x = (uint32_t)x_pos; + pos_cpy.y = (uint32_t)y_pos; if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) @@ -3017,7 +2976,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); } -static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) { struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; @@ -3027,7 +2986,7 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, attributes); } -static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) { uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level; struct fixed31_32 multiplier; @@ -3054,12 +3013,12 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &opt_attr); } -/** -* apply_front_porch_workaround TODO FPGA still need? -* -* This is a workaround for a bug that has existed since R5xx and has not been -* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. -*/ +/* + * apply_front_porch_workaround TODO FPGA still need? + * + * This is a workaround for a bug that has existed since R5xx and has not been + * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. + */ static void apply_front_porch_workaround( struct dc_crtc_timing *timing) { @@ -3072,7 +3031,7 @@ static void apply_front_porch_workaround( } } -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) { const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; struct dc_crtc_timing patched_crtc_timing; @@ -3101,34 +3060,8 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } -void lock_all_pipes(struct dc *dc, - struct dc_state *context, - bool lock) -{ - struct pipe_ctx *pipe_ctx; - struct timing_generator *tg; - int i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - pipe_ctx = &context->res_ctx.pipe_ctx[i]; - tg = pipe_ctx->stream_res.tg; - /* - * Only lock the top pipe's tg to prevent redundant - * (un)locking. Also skip if pipe is disabled. - */ - if (pipe_ctx->top_pipe || - !pipe_ctx->stream || !pipe_ctx->plane_state || - !tg->funcs->is_tg_enabled(tg)) - continue; - - if (lock) - tg->funcs->lock(tg); - else - tg->funcs->unlock(tg); - } -} - -static void calc_vupdate_position( +static void dcn10_calc_vupdate_position( + struct dc *dc, struct pipe_ctx *pipe_ctx, uint32_t *start_line, uint32_t *end_line) @@ -3136,7 +3069,7 @@ static void calc_vupdate_position( const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; int vline_int_offset_from_vupdate = pipe_ctx->stream->periodic_interrupt0.lines_offset; - int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx); + int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); int start_position; if (vline_int_offset_from_vupdate > 0) @@ -3157,7 +3090,8 @@ static void calc_vupdate_position( *end_line = 2; } -static void cal_vline_position( +static void dcn10_cal_vline_position( + struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline, uint32_t *start_line, @@ -3172,7 +3106,8 @@ static void cal_vline_position( switch (ref_point) { case START_V_UPDATE: - calc_vupdate_position( + dcn10_calc_vupdate_position( + dc, pipe_ctx, start_line, end_line); @@ -3186,7 +3121,8 @@ static void cal_vline_position( } } -static void dcn10_setup_periodic_interrupt( +void dcn10_setup_periodic_interrupt( + struct dc *dc, struct pipe_ctx *pipe_ctx, enum vline_select vline) { @@ -3196,7 +3132,7 @@ static void dcn10_setup_periodic_interrupt( uint32_t start_line = 0; uint32_t end_line = 0; - cal_vline_position(pipe_ctx, vline, &start_line, &end_line); + dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line); tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); @@ -3207,10 +3143,10 @@ static void dcn10_setup_periodic_interrupt( } } -static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; - int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (start_line < 0) { ASSERT(0); @@ -3221,12 +3157,13 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; /* only 3 items below are used by unblank */ params.timing = pipe_ctx->stream->timing; @@ -3240,11 +3177,11 @@ static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } -static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, const uint8_t *custom_sdp_message, unsigned int sdp_message_size) { @@ -3255,7 +3192,7 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, sdp_message_size); } } -static enum dc_status dcn10_set_clock(struct dc *dc, +enum dc_status dcn10_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) @@ -3295,7 +3232,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc, } -static void dcn10_get_clock(struct dc *dc, +void dcn10_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) { @@ -3305,77 +3242,3 @@ static void dcn10_get_clock(struct dc *dc, dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg); } - -static const struct hw_sequencer_funcs dcn10_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn10_init_hw, - .init_pipes = dcn10_init_pipes, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, - .update_plane_addr = dcn10_update_plane_addr, - .plane_atomic_disconnect = hwss1_plane_atomic_disconnect, - .update_dchub = dcn10_update_dchub, - .update_mpcc = dcn10_update_mpcc, - .update_pending_status = dcn10_update_pending_status, - .set_input_transfer_func = dcn10_set_input_transfer_func, - .set_output_transfer_func = dcn10_set_output_transfer_func, - .program_output_csc = dcn10_program_output_csc, - .power_down = dce110_power_down, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dce110_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn10_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .disable_plane = dcn10_disable_plane, - .blank_pixel_data = dcn10_blank_pixel_data, - .pipe_control_lock = dcn10_pipe_control_lock, - .prepare_bandwidth = dcn10_prepare_bandwidth, - .optimize_bandwidth = dcn10_optimize_bandwidth, - .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, - .enable_stream_timing = dcn10_enable_stream_timing, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = NULL, - .disable_vga = dcn10_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn10_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn10_enable_power_gating_plane, - .dpp_pg_control = dcn10_dpp_pg_control, - .hubp_pg_control = dcn10_hubp_pg_control, - .dsc_pg_control = NULL, -}; - - -void dcn10_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn10_funcs; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index d3616b1948cc..55b8f3b2fc4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -27,68 +27,160 @@ #define __DC_HWSS_DCN10_H__ #include "core_types.h" +#include "hw_sequencer_private.h" struct dc; void dcn10_hw_sequencer_construct(struct dc *dc); -extern void fill_display_configs( - const struct dc_state *context, - struct dm_pp_display_configuration *pp_display_cfg); - -bool is_rgb_cspace(enum dc_color_space output_color_space); - -void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void dcn10_verify_allow_pstate_change_high(struct dc *dc); +int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +enum dc_status dcn10_enable_stream_timing( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); +void dcn10_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); +void dcn10_prepare_bandwidth( + struct dc *dc, + struct dc_state *context); +void dcn10_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn10_blank_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); +void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn10_program_output_csc(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); +bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); +bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_apply_ctx_for_surface( + struct dc *dc, + const struct dc_stream_state *stream, + int num_planes, + struct dc_state *context); +void dcn10_hubp_pg_control( + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); +void dcn10_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); +void dcn10_enable_power_gating_plane( + struct dce_hwseq *hws, + bool enable); +void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_vga( + struct dce_hwseq *hws); void dcn10_program_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); - -void dcn10_get_hw_state( +void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx); +void dcn10_init_hw(struct dc *dc); +void dcn10_init_pipes(struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_ctx_to_hw( + struct dc *dc, + struct dc_state *context); +void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); +void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); +void dce110_power_down(struct dc *dc); +void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); +void dcn10_enable_timing_synchronization( + struct dc *dc, + int group_index, + int group_size, + struct pipe_ctx *grouped_pipes[]); +void dcn10_enable_per_frame_crtc_position_reset( + struct dc *dc, + int group_size, + struct pipe_ctx *grouped_pipes[]); +void dce110_update_info_frame(struct pipe_ctx *pipe_ctx); +void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, + const uint8_t *custom_sdp_message, + unsigned int sdp_message_size); +void dce110_blank_stream(struct pipe_ctx *pipe_ctx); +void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx); +void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx); +bool dcn10_dummy_display_power_gating( struct dc *dc, - char *pBuf, unsigned int bufSize, + uint8_t controller_id, + struct dc_bios *dcb, + enum pipe_gating_control power_gating); +void dcn10_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, unsigned int vmin, unsigned int vmax, + unsigned int vmid, unsigned int vmid_frame_number); +void dcn10_get_position(struct pipe_ctx **pipe_ctx, + int num_pipes, + struct crtc_position *position); +void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, + int num_pipes, const struct dc_static_screen_events *events); +void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc); +void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); +void dcn10_get_hw_state(struct dc *dc, + char *pBuf, + unsigned int bufSize, unsigned int mask); - void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); - -bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); - -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); - -void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); - +void dcn10_wait_for_mpcc_disconnect( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx); +void dce110_edp_backlight_control( + struct dc_link *link, + bool enable); +void dce110_edp_power_control( + struct dc_link *link, + bool power_up); +void dce110_edp_wait_for_hpd_ready( + struct dc_link *link, + bool power_up); +void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); +void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); +void dcn10_setup_periodic_interrupt( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); +enum dc_status dcn10_set_clock(struct dc *dc, + enum dc_clock_type clock_type, + uint32_t clk_khz, + uint32_t stepping); +void dcn10_get_clock(struct dc *dc, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg); +bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_bios_golden_init(struct dc *dc); +void dcn10_plane_atomic_power_down(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp); void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color); - void dcn10_get_hdr_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); - -bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx); - -void update_dchubp_dpp( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context); - -struct pipe_ctx *find_top_pipe_for_stream( - struct dc *dc, - struct dc_state *context, - const struct dc_stream_state *stream); - -int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); - -void dcn10_build_prescale_params(struct dc_bias_and_scale *bias_and_scale, - const struct dc_plane_state *plane_state); -void lock_all_pipes(struct dc *dc, - struct dc_state *context, - bool lock); +void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); +void dcn10_verify_allow_pstate_change_high(struct dc *dc); #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h new file mode 100644 index 000000000000..596f95c22e85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN10_DEBUG_H__ +#define __DC_HWSS_DCN10_DEBUG_H__ + +#include "core_types.h" + +struct dc; + +void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); + +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); + +void dcn10_get_hw_state(struct dc *dc, + char *pBuf, + unsigned int bufSize, + unsigned int mask); + +#endif /* __DC_HWSS_DCN10_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c new file mode 100644 index 000000000000..e7e5352ec424 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -0,0 +1,111 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hw_sequencer_private.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10_hw_sequencer.h" + +static const struct hw_sequencer_funcs dcn10_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, + .update_plane_addr = dcn10_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn10_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dce110_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn10_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn10_disable_plane, + .pipe_control_lock = dcn10_pipe_control_lock, + .prepare_bandwidth = dcn10_prepare_bandwidth, + .optimize_bandwidth = dcn10_optimize_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn10_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn10_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_mpcc = dcn10_update_mpcc, + .set_input_transfer_func = dcn10_set_input_transfer_func, + .set_output_transfer_func = dcn10_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn10_blank_pixel_data, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn10_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = NULL, + .disable_vga = dcn10_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn10_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn10_enable_power_gating_plane, + .dpp_pg_control = dcn10_dpp_pg_control, + .hubp_pg_control = dcn10_hubp_pg_control, + .dsc_pg_control = NULL, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, +}; + +void dcn10_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn10_funcs; + dc->hwseq->funcs = dcn10_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h new file mode 100644 index 000000000000..8c6fd7b844a4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN10_INIT_H__ +#define __DC_DCN10_INIT_H__ + +struct dc; + +void dcn10_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN10_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c index 0fb9e440cb9d..f05371c1fc36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c @@ -53,11 +53,9 @@ static const struct ipp_funcs dcn10_ipp_funcs = { .ipp_destroy = dcn10_ipp_destroy }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) static const struct ipp_funcs dcn20_ipp_funcs = { .ipp_destroy = dcn10_ipp_destroy }; -#endif void dcn10_ipp_construct( struct dcn10_ipp *ippn10, @@ -76,7 +74,6 @@ void dcn10_ipp_construct( ippn10->ipp_mask = ipp_mask; } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn20_ipp_construct( struct dcn10_ipp *ippn10, struct dc_context *ctx, @@ -93,4 +90,3 @@ void dcn20_ipp_construct( ippn10->ipp_shift = ipp_shift; ippn10->ipp_mask = ipp_mask; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h index cfa24459242b..f0e0d07b0311 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h @@ -49,7 +49,6 @@ SRI(CURSOR_HOT_SPOT, CURSOR, id), \ SRI(CURSOR_DST_OFFSET, CURSOR, id) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define IPP_REG_LIST_DCN20(id) \ IPP_REG_LIST_DCN(id), \ SRI(CURSOR_SETTINGS, HUBPREQ, id), \ @@ -60,7 +59,6 @@ SRI(CURSOR_POSITION, CURSOR0_, id), \ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \ SRI(CURSOR_DST_OFFSET, CURSOR0_, id) -#endif #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L @@ -105,7 +103,6 @@ IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define IPP_MASK_SH_LIST_DCN20(mask_sh) \ IPP_MASK_SH_LIST_DCN(mask_sh), \ IPP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \ @@ -124,7 +121,6 @@ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) -#endif #define IPP_DCN10_REG_FIELD_LIST(type) \ type CNVC_SURFACE_PIXEL_FORMAT; \ @@ -196,13 +192,11 @@ void dcn10_ipp_construct(struct dcn10_ipp *ippn10, const struct dcn10_ipp_shift *ipp_shift, const struct dcn10_ipp_mask *ipp_mask); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void dcn20_ipp_construct(struct dcn10_ipp *ippn10, struct dc_context *ctx, int inst, const struct dcn10_ipp_registers *regs, const struct dcn10_ipp_shift *ipp_shift, const struct dcn10_ipp_mask *ipp_mask); -#endif #endif /* _DCN10_IPP_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 8bf5f0f2301d..7493a630f4dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -72,9 +72,7 @@ struct dcn10_link_enc_aux_registers { uint32_t AUX_CONTROL; uint32_t AUX_DPHY_RX_CONTROL0; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t AUX_DPHY_TX_CONTROL; -#endif }; struct dcn10_link_enc_hpd_registers { @@ -106,13 +104,26 @@ struct dcn10_link_enc_registers { uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; uint32_t DP_SEC_CNTL1; uint32_t TMDS_CTL_BITS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* DCCG */ uint32_t CLOCK_ENABLE; /* DIG */ uint32_t DIG_LANE_ENABLE; /* UNIPHY */ uint32_t CHANNEL_XBAR_CNTL; + /* DPCS */ + uint32_t RDPCSTX_PHY_CNTL3; + uint32_t RDPCSTX_PHY_CNTL4; + uint32_t RDPCSTX_PHY_CNTL5; + uint32_t RDPCSTX_PHY_CNTL6; + uint32_t RDPCSTX_PHY_CNTL7; + uint32_t RDPCSTX_PHY_CNTL8; + uint32_t RDPCSTX_PHY_CNTL9; + uint32_t RDPCSTX_PHY_CNTL10; + uint32_t RDPCSTX_PHY_CNTL11; + uint32_t RDPCSTX_PHY_CNTL12; + uint32_t RDPCSTX_PHY_CNTL13; + uint32_t RDPCSTX_PHY_CNTL14; + uint32_t RDPCSTX_PHY_CNTL15; /* indirect registers */ uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; @@ -122,7 +133,6 @@ struct dcn10_link_enc_registers { uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3; uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3; -#endif }; #define LE_SF(reg_name, field_name, post_fix)\ @@ -228,7 +238,6 @@ struct dcn10_link_enc_registers { type AUX_LS_READ_EN;\ type AUX_RX_RECEIVE_WINDOW -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DCN20_LINK_ENCODER_DPCS_REG_FIELD_LIST(type) \ type RDPCS_PHY_DP_TX0_DATA_EN;\ @@ -250,6 +259,10 @@ struct dcn10_link_enc_registers { type RDPCS_EXT_REFCLK_EN;\ type RDPCS_TX_FIFO_EN;\ type UNIPHY_LINK_ENABLE;\ + type UNIPHY_CHANNEL0_XBAR_SOURCE;\ + type UNIPHY_CHANNEL1_XBAR_SOURCE;\ + type UNIPHY_CHANNEL2_XBAR_SOURCE;\ + type UNIPHY_CHANNEL3_XBAR_SOURCE;\ type UNIPHY_CHANNEL0_INVERT;\ type UNIPHY_CHANNEL1_INVERT;\ type UNIPHY_CHANNEL2_INVERT;\ @@ -337,16 +350,46 @@ struct dcn10_link_enc_registers { type RDPCS_TX_FIFO_ERROR_MASK;\ type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\ type RDPCS_DPALT_4LANE_TOGGLE_MASK;\ + type RDPCS_PHY_DPALT_DP4;\ type RDPCS_PHY_DPALT_DISABLE;\ type RDPCS_PHY_DPALT_DISABLE_ACK;\ type RDPCS_PHY_DP_MPLLB_V2I;\ type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\ + type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\ + type RDPCS_PHY_RX_VREF_CTRL;\ type RDPCS_PHY_DP_MPLLB_CP_INT;\ type RDPCS_PHY_DP_MPLLB_CP_PROP;\ type RDPCS_PHY_RX_REF_LD_VAL;\ type RDPCS_PHY_RX_VCO_LD_VAL;\ type DPCSTX_DEBUG_CONFIG; \ - type RDPCSTX_DEBUG_CONFIG + type RDPCSTX_DEBUG_CONFIG; \ + type RDPCS_PHY_DP_TX0_EQ_MAIN;\ + type RDPCS_PHY_DP_TX0_EQ_PRE;\ + type RDPCS_PHY_DP_TX0_EQ_POST;\ + type RDPCS_PHY_DP_TX1_EQ_MAIN;\ + type RDPCS_PHY_DP_TX1_EQ_PRE;\ + type RDPCS_PHY_DP_TX1_EQ_POST;\ + type RDPCS_PHY_DP_TX2_EQ_MAIN;\ + type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\ + type RDPCS_PHY_DP_TX2_EQ_PRE;\ + type RDPCS_PHY_DP_TX2_EQ_POST;\ + type RDPCS_PHY_DP_TX3_EQ_MAIN;\ + type RDPCS_PHY_DCO_RANGE;\ + type RDPCS_PHY_DCO_FINETUNE;\ + type RDPCS_PHY_DP_TX3_EQ_PRE;\ + type RDPCS_PHY_DP_TX3_EQ_POST;\ + type RDPCS_PHY_SUP_PRE_HP;\ + type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\ + type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\ + type UNIPHYA_SOFT_RESET;\ + type UNIPHYB_SOFT_RESET;\ + type UNIPHYC_SOFT_RESET;\ + type UNIPHYD_SOFT_RESET;\ + type UNIPHYE_SOFT_RESET;\ + type UNIPHYF_SOFT_RESET #define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_LANE0EN;\ @@ -375,20 +418,15 @@ struct dcn10_link_enc_registers { type AUX_TX_PRECHARGE_SYMBOLS; \ type AUX_MODE_DET_CHECK_DELAY;\ type DPCS_DBG_CBUS_DIS -#endif struct dcn10_link_enc_shift { DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); -#endif }; struct dcn10_link_enc_mask { DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); -#endif }; struct dcn10_link_encoder { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 8b2f29f6dabd..04f863499cfb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -42,20 +42,27 @@ void mpc1_set_bg_color(struct mpc *mpc, int mpcc_id) { struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id); + uint32_t bg_r_cr, bg_g_y, bg_b_cb; + + /* find bottommost mpcc. */ + while (bottommost_mpcc->mpcc_bot) { + bottommost_mpcc = bottommost_mpcc->mpcc_bot; + } /* mpc color is 12 bit. tg_color is 10 bit */ /* todo: might want to use 16 bit to represent color and have each * hw block translate to correct color depth. */ - uint32_t bg_r_cr = bg_color->color_r_cr << 2; - uint32_t bg_g_y = bg_color->color_g_y << 2; - uint32_t bg_b_cb = bg_color->color_b_cb << 2; + bg_r_cr = bg_color->color_r_cr << 2; + bg_g_y = bg_color->color_g_y << 2; + bg_b_cb = bg_color->color_b_cb << 2; - REG_SET(MPCC_BG_R_CR[mpcc_id], 0, + REG_SET(MPCC_BG_R_CR[bottommost_mpcc->mpcc_id], 0, MPCC_BG_R_CR, bg_r_cr); - REG_SET(MPCC_BG_G_Y[mpcc_id], 0, + REG_SET(MPCC_BG_G_Y[bottommost_mpcc->mpcc_id], 0, MPCC_BG_G_Y, bg_g_y); - REG_SET(MPCC_BG_B_CB[mpcc_id], 0, + REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0, MPCC_BG_B_CB, bg_b_cb); } @@ -457,12 +464,10 @@ static const struct mpc_funcs dcn10_mpc_funcs = { .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, .update_blending = mpc1_update_blending, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .set_denorm = NULL, .set_denorm_clamp = NULL, .set_output_csc = NULL, .set_output_gamma = NULL, -#endif }; void dcn10_mpc_construct(struct dcn10_mpc *mpc10, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index e9ebbbe256b4..d79718fde5a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding( REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 1, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); @@ -237,6 +240,9 @@ void opp1_set_dyn_expansion( FMT_DYNAMIC_EXP_EN, 0, FMT_DYNAMIC_EXP_MODE, 0); + if (opp->dyn_expansion == DYN_EXPANSION_DISABLE) + return; + /*00 - 10-bit -> 12-bit dynamic expansion*/ /*01 - 8-bit -> 12-bit dynamic expansion*/ if (signal == SIGNAL_TYPE_HDMI_TYPE_A || @@ -367,11 +373,9 @@ void opp1_program_oppbuf( */ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* Controls the number of padded pixels at the end of a segment */ if (REG(OPPBUF_CONTROL1)) REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -#endif } void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) @@ -398,9 +402,8 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, .opp_program_stereo = opp1_program_stereo, .opp_pipe_clock_control = opp1_pipe_clock_control, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) .opp_set_disp_pattern_generator = NULL, -#endif + .dpg_is_blanked = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 0f10adea000c..2c0ecfa5a643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -116,6 +116,8 @@ type FMT_RAND_G_SEED; \ type FMT_RAND_B_SEED; \ type FMT_PIXEL_ENCODING; \ + type FMT_SUBSAMPLING_MODE; \ + type FMT_CBCR_BIT_REDUCTION_BYPASS; \ type FMT_CLAMP_DATA_EN; \ type FMT_CLAMP_COLOR_FORMAT; \ type FMT_DYNAMIC_EXP_EN; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index e74a07d03fde..cd7412dc42d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -457,11 +457,16 @@ static bool optc1_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); + REG_SEQ_START(); + /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + return true; } @@ -1230,59 +1235,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc) return ret; } -bool optc1_is_matching_timing(struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing) +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing) { - struct dc_crtc_timing hw_crtc_timing = {0}; struct dcn_otg_state s = {0}; - if (tg == NULL || otg_timing == NULL) + if (tg == NULL || hw_crtc_timing == NULL) return false; optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); - hw_crtc_timing.h_total = s.h_total + 1; - hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); - hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start; - hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start; - - hw_crtc_timing.v_total = s.v_total + 1; - hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); - hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start; - hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start; - - if (otg_timing->h_total != hw_crtc_timing.h_total) - return false; - - if (otg_timing->h_border_left != hw_crtc_timing.h_border_left) - return false; - - if (otg_timing->h_addressable != hw_crtc_timing.h_addressable) - return false; - - if (otg_timing->h_border_right != hw_crtc_timing.h_border_right) - return false; - - if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch) - return false; - - if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width) - return false; - - if (otg_timing->v_total != hw_crtc_timing.v_total) - return false; - - if (otg_timing->v_border_top != hw_crtc_timing.v_border_top) - return false; - - if (otg_timing->v_addressable != hw_crtc_timing.v_addressable) - return false; + hw_crtc_timing->h_total = s.h_total + 1; + hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); + hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start; + hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start; - if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) - return false; - - if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width) - return false; + hw_crtc_timing->v_total = s.v_total + 1; + hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); + hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start; + hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start; return true; } @@ -1486,7 +1457,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, - .is_matching_timing = optc1_is_matching_timing, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, @@ -1514,7 +1484,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .configure_crc = optc1_configure_crc, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc1_program_manual_trigger, - .setup_manual_trigger = optc1_setup_manual_trigger + .setup_manual_trigger = optc1_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, }; void dcn10_timing_generator_init(struct optc *optc1) @@ -1531,7 +1502,6 @@ void dcn10_timing_generator_init(struct optc *optc1) optc1->min_v_sync_width = 1; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this: * * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as @@ -1544,15 +1514,12 @@ void dcn10_timing_generator_init(struct optc *optc1) * to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well. * */ -#endif bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); -#endif return two_pix; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 83575599672e..3afeb1a30f21 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -165,13 +165,11 @@ struct dcn_optc_registers { uint32_t OTG_CRC0_WINDOWB_X_CONTROL; uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; uint32_t GSL_SOURCE_SELECT; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t DWB_SOURCE_SELECT; uint32_t OTG_DSC_START_POSITION; uint32_t OPTC_DATA_FORMAT_CONTROL; uint32_t OPTC_BYTES_PER_PIXEL; uint32_t OPTC_WIDTH_CONTROL; -#endif }; #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ @@ -456,7 +454,6 @@ struct dcn_optc_registers { type MANUAL_FLOW_CONTROL;\ type MANUAL_FLOW_CONTROL_SEL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define TG_REG_FIELD_LIST(type) \ TG_REG_FIELD_LIST_DCN1_0(type)\ @@ -479,12 +476,6 @@ struct dcn_optc_registers { type OPTC_DWB0_SOURCE_SELECT;\ type OPTC_DWB1_SOURCE_SELECT; -#else - -#define TG_REG_FIELD_LIST(type) \ - TG_REG_FIELD_LIST_DCN1_0(type) - -#endif struct dcn_optc_shift { @@ -542,14 +533,14 @@ struct dcn_otg_state { uint32_t h_total; uint32_t underflow_occurred_status; uint32_t otg_enabled; + uint32_t blank_enabled; }; void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); -bool optc1_is_matching_timing( - struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing); +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing); bool optc1_validate_timing( struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1599bb971111..3b71898e859e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -28,6 +28,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn10_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn10_resource.h" @@ -319,6 +321,14 @@ static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN10_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ @@ -471,6 +481,28 @@ static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ @@ -642,7 +674,10 @@ struct dce_aux *dcn10_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -751,14 +786,18 @@ struct link_encoder *dcn10_link_encoder_create( { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc10) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -882,7 +921,7 @@ static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) return pp_smu; } -static void destruct(struct dcn10_resource_pool *pool) +static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) { unsigned int i; @@ -1129,7 +1168,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); - destruct(dcn10_pool); + dcn10_resource_destruct(dcn10_pool); kfree(dcn10_pool); *pool = NULL; } @@ -1268,7 +1307,7 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } -static bool construct( +static bool dcn10_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn10_resource_pool *pool) @@ -1308,6 +1347,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + dc->caps.extended_aux_timeout_support = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; @@ -1553,7 +1594,7 @@ static bool construct( fail: - destruct(pool); + dcn10_resource_destruct(pool); return false; } @@ -1568,7 +1609,7 @@ struct resource_pool *dcn10_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; kfree(pool); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9aa258f3550b..376c4264d295 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -247,6 +247,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { uint32_t h_active_start; @@ -312,10 +313,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). */ - if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (output_color_space == COLOR_SPACE_2020_YCBCR) || - (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || - (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) + if (use_vsc_sdp_for_colorimetry) misc1 = misc1 | 0x40; else misc1 = misc1 & ~0x40; @@ -1553,6 +1551,66 @@ unsigned int enc1_dig_source_otg( return tg_inst; } +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth) +{ + uint32_t hw_encoding = 0; + uint32_t hw_depth = 0; + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (enc == NULL || + encoding == NULL || + depth == NULL) + return false; + + REG_GET_2(DP_PIXEL_FORMAT, + DP_PIXEL_ENCODING, &hw_encoding, + DP_COMPONENT_DEPTH, &hw_depth); + + switch (hw_depth) { + case DP_COMPONENT_PIXEL_DEPTH_6BPC: + *depth = COLOR_DEPTH_666; + break; + case DP_COMPONENT_PIXEL_DEPTH_8BPC: + *depth = COLOR_DEPTH_888; + break; + case DP_COMPONENT_PIXEL_DEPTH_10BPC: + *depth = COLOR_DEPTH_101010; + break; + case DP_COMPONENT_PIXEL_DEPTH_12BPC: + *depth = COLOR_DEPTH_121212; + break; + case DP_COMPONENT_PIXEL_DEPTH_16BPC: + *depth = COLOR_DEPTH_161616; + break; + default: + *depth = COLOR_DEPTH_UNDEFINED; + break; + } + + switch (hw_encoding) { + case DP_PIXEL_ENCODING_TYPE_RGB444: + *encoding = PIXEL_ENCODING_RGB; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR422: + *encoding = PIXEL_ENCODING_YCBCR422; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR444: + case DP_PIXEL_ENCODING_TYPE_Y_ONLY: + *encoding = PIXEL_ENCODING_YCBCR444; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR420: + *encoding = PIXEL_ENCODING_YCBCR420; + break; + default: + *encoding = PIXEL_ENCODING_UNDEFINED; + break; + } + return true; +} + static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dp_set_stream_attribute = enc1_stream_encoder_dp_set_stream_attribute, @@ -1589,6 +1647,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dig_connect_to_otg = enc1_dig_connect_to_otg, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, }; void dcn10_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a512cbea00d1..f9b9e221c698 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -163,14 +163,12 @@ struct dcn10_stream_enc_registers { uint32_t DP_MSA_TIMING_PARAM3; uint32_t DP_MSA_TIMING_PARAM4; uint32_t HDMI_DB_CONTROL; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t DP_DSC_CNTL; uint32_t DP_DSC_BYTES_PER_PIXEL; uint32_t DME_CONTROL; uint32_t DP_SEC_METADATA_TRANSMISSION; uint32_t HDMI_METADATA_PACKET_CONTROL; uint32_t DP_SEC_FRAMING4; -#endif uint32_t DIG_CLOCK_PATTERN; }; @@ -466,7 +464,6 @@ struct dcn10_stream_enc_registers { type DIG_SOURCE_SELECT;\ type DIG_CLOCK_PATTERN -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define SE_REG_FIELD_LIST_DCN2_0(type) \ type DP_DSC_MODE;\ type DP_DSC_SLICE_WIDTH;\ @@ -485,20 +482,15 @@ struct dcn10_stream_enc_registers { type DOLBY_VISION_EN;\ type DP_PIXEL_COMBINE;\ type DP_SST_SDP_SPLITTING -#endif struct dcn10_stream_encoder_shift { SE_REG_FIELD_LIST_DCN1_0(uint8_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SE_REG_FIELD_LIST_DCN2_0(uint8_t); -#endif }; struct dcn10_stream_encoder_mask { SE_REG_FIELD_LIST_DCN1_0(uint32_t); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) SE_REG_FIELD_LIST_DCN2_0(uint32_t); -#endif }; struct dcn10_stream_encoder { @@ -526,6 +518,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void enc1_stream_encoder_hdmi_set_stream_attribute( @@ -621,4 +614,9 @@ void get_audio_clock_info( void enc1_reset_hdmi_stream_attribute( struct stream_encoder *enc); +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index ddb8d5649e79..fd52862d6624 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -1,24 +1,28 @@ +# SPDX-License-Identifier: MIT # # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ +DCN20 = dcn20_resource.o dcn20_init.o dcn20_hwseq.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DCN20 += dcn20_dsc.o -endif -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse -CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4 +else CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 16476ed25536..1e1151356e60 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -44,16 +44,12 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg2_update_dpp_dto(struct dccg *dccg, - int dpp_inst, - int req_dppclk, - bool reduce_divider_only) +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; - int current_phase, current_modulo; ASSERT(req_dppclk <= ref_dppclk); /* need to clamp to 8 bits */ @@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg, if (req_dppclk > ref_dppclk) req_dppclk = ref_dppclk; } - - REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst], - DPPCLK0_DTO_PHASE, ¤t_phase, - DPPCLK0_DTO_MODULO, ¤t_modulo); - - if (reduce_divider_only) { - // requested phase/modulo greater than current - if (req_dppclk * current_modulo >= current_phase * ref_dppclk) { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, current_phase, - DPPCLK0_DTO_MODULO, current_modulo); - } - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } - + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, req_dppclk, + DPPCLK0_DTO_MODULO, ref_dppclk); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { @@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg, void dccg2_init(struct dccg *dccg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - // Fallthrough intentional to program all available dpp_dto's - switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { - case 6: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); - /* Fall through */ - case 5: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); - /* Fall through */ - case 4: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); - /* Fall through */ - case 3: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); - /* Fall through */ - case 2: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); - /* Fall through */ - case 1: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); - break; - default: - ASSERT(false); - break; - } } static const struct dccg_funcs dccg2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 74a074a873cd..2205cb0204e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -97,7 +97,7 @@ struct dcn_dccg { const struct dccg_mask *dccg_mask; }; -void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only); +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); void dccg2_get_dccg_ref_freq(struct dccg *dccg, unsigned int xtalin_freq_inKhz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 2f5aade1e882..4d7e45892f08 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps( struct scaler_data *scl_data, const struct scaling_taps *in_taps) { - uint32_t pixel_width; - - if (scl_data->viewport.width > scl_data->recout.width) - pixel_width = scl_data->recout.width; - else - pixel_width = scl_data->viewport.width; - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ if (scl_data->viewport.width != scl_data->h_active && scl_data->viewport.height != scl_data->v_active && @@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .dpp_read_state = dpp20_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 290b2854bd2c..5b03b737b1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -30,16 +30,20 @@ #define TO_DCN20_DPP(dpp)\ container_of(dpp, struct dcn20_dpp, base) -#define TF_REG_LIST_DCN20(id) \ - TF_REG_LIST_DCN(id), \ +#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) + +#define TF_REG_LIST_DCN20_COMMON(id) \ SRI(CM_BLNDGAM_CONTROL, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ @@ -66,9 +70,6 @@ SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ @@ -147,7 +148,12 @@ SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_LUT_INDEX, CM, id), \ + SRI(CM_SHAPER_LUT_INDEX, CM, id) + +#define TF_REG_LIST_DCN20(id) \ + TF_REG_LIST_DCN(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ SRI(CURSOR_CONTROL, CURSOR0_, id), \ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ @@ -166,27 +172,41 @@ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ SRI(DSCL_MEM_PWR_CTRL, DSCL, id) -#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + +#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -261,18 +281,9 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -341,9 +352,6 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ @@ -356,7 +364,6 @@ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ @@ -521,9 +528,14 @@ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ @@ -560,6 +572,7 @@ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) + #define TF_REG_FIELD_LIST_DCN2_0(type) \ TF_REG_FIELD_LIST(type) \ type CM_BLNDGAM_LUT_DATA; \ @@ -593,6 +606,7 @@ type OBUF_MEM_PWR_FORCE;\ type LUT_MEM_PWR_FORCE + struct dcn2_dpp_shift { TF_REG_FIELD_LIST_DCN2_0(uint8_t); }; @@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - bool dpp2_construct(struct dcn20_dpp *dpp2, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 2d112c316424..05a3e7f97ef0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -149,6 +149,9 @@ void dpp2_set_degamma( case IPP_DEGAMMA_MODE_HW_xvYCC: REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; default: BREAK_TO_DEBUGGER(); break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 1b419407af94..0111545dac75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "reg_helper.h" #include "dcn20_dsc.h" #include "dsc/dscc_types.h" @@ -118,7 +117,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock dsc_enc_caps->color_formats.bits.RGB = 1; dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; - dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; @@ -222,9 +221,18 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) { struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; - /* TODO Check if DSC alreay in use? */ - DC_LOG_DSC("enable DSC at opp pipe %d", opp_pipe); + DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } REG_UPDATE(DSC_TOP_CONTROL, DSC_CLOCK_EN, 1); @@ -238,8 +246,18 @@ static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) static void dsc2_disable(struct display_stream_compressor *dsc) { struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; - DC_LOG_DSC("disable DSC"); + DC_LOG_DSC("disable DSC %d", dsc->inst); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if (!dsc_clock_en || !dsc_fw_config) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, 0); @@ -715,4 +733,3 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 4e2fb38390a4..9855a7ed0387 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -21,7 +21,6 @@ * Authors: AMD * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef __DCN20_DSC_H__ #define __DCN20_DSC_H__ @@ -572,4 +571,3 @@ void dsc2_construct(struct dcn20_dsc *dsc, #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index cd8bc92ce3ba..880954ac0b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, struct scaling_taps num_taps) { uint32_t h_ratio_luma = 1; - uint32_t h_ratio_chroma = 1; uint32_t h_taps_luma = num_taps.h_taps; uint32_t h_taps_chroma = num_taps.h_taps_c; int32_t h_init_phase_luma = 0; @@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, h_ratio_luma = -1; else h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5; - h_ratio_chroma = h_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma); @@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, enum dwb_subsample_position subsample_position) { uint32_t v_ratio_luma = 1; - uint32_t v_ratio_chroma = 1; uint32_t v_taps_luma = num_taps.v_taps; uint32_t v_taps_chroma = num_taps.v_taps_c; int32_t v_init_phase_luma = 0; @@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, v_ratio_luma = -1; else v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5; - v_ratio_chroma = v_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index b83c022e2c6f..8b8438566101 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl } static void hubbub2_det_request_size( + unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { - unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ - unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; @@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, &segment_order_horz, &segment_order_vert)) return false; - hubbub2_det_request_size(input->surface_size.height, input->surface_size.width, + hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { @@ -588,7 +588,7 @@ static void hubbub2_program_watermarks( DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); } static const struct hubbub_funcs hubbub2_funcs = { @@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = { .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub2_program_watermarks + .program_watermarks = hubbub2_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control }; void hubbub2_construct(struct dcn20_hubbub *hubbub, @@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 626117d3b4e9..501532dd523a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -81,6 +81,7 @@ struct dcn20_hubbub { unsigned int debug_test_index_pstate; struct dcn_watermark_set watermarks; struct dcn20_vmid vmid[16]; + unsigned int detile_buf_size; }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 69e2aae42394..84d7ac5dd206 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -30,6 +30,8 @@ #include "reg_helper.h" #include "basics/conversion.h" +#define DC_LOGGER_INIT(logger) + #define REG(reg)\ hubp2->hubp_regs->reg @@ -483,7 +485,6 @@ void hubp2_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 12); break; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 112); @@ -504,7 +505,6 @@ void hubp2_program_pixel_format( REG_UPDATE(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, 119); break; -#endif default: BREAK_TO_DEBUGGER(); break; @@ -1204,6 +1204,9 @@ void hubp2_read_state_common(struct hubp *hubp) HUBP_TTU_DISABLE, &s->ttu_disable, HUBP_UNDERFLOW_STATUS, &s->underflow_status); + REG_GET(HUBP_CLK_CNTL, + HUBP_CLOCK_ENABLE, &s->clock_en); + REG_GET(DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, &s->min_ttu_vblank); @@ -1243,6 +1246,314 @@ void hubp2_read_state(struct hubp *hubp) } +void hubp2_validate_dml_output(struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; + struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; + struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; + DC_LOGGER_INIT(ctx->logger); + DC_LOG_DEBUG("DML Validation | Running Validation"); + + /* Requestor Regs */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + + if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) + DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); + if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); + if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); + if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); + if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + + if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); + if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); + if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); + if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); + if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); + if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); + if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); + if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + + if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); + if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); + if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); + if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); + if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); + if (rq_regs.rq_regs_c.mpte_group_size != dml_rq_regs->rq_regs_c.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.mpte_group_size, rq_regs.rq_regs_c.mpte_group_size); + if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); + if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + + if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); + if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); + if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n", + dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); + if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) + DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); + if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); + if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); + if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) + DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n", + dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + + if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); + if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); + if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); + if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); + if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); + if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); + if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); + if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); + if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); + if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); + if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); + if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); + if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); + if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); + if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); + if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + + if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); + if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); + REG_GET_3(DCN_CUR0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); + REG_GET(FLIP_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); + REG_GET(DCN_CUR0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); + REG_GET(DCN_CUR1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + + if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); + if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); + if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); + if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); + if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); + if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); + if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); + if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); + if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); + if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); + if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); + if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) + DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); + if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); + if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); +} + static struct hubp_funcs dcn20_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -1266,6 +1577,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp1_init, + .validate_dml_output = hubp2_validate_dml_output, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index d5c8615af45e..8c04a3606a54 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -148,7 +148,6 @@ uint32_t VMID_SETTINGS_0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ @@ -157,7 +156,6 @@ uint32_t FLIP_PARAMETERS_6;\ uint32_t VBLANK_PARAMETERS_5;\ uint32_t VBLANK_PARAMETERS_6 -#endif #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \ @@ -184,7 +182,6 @@ type SURFACE_TRIPLE_BUFFER_ENABLE;\ type VMID -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\ type REFCYC_PER_VM_GROUP_FLIP;\ @@ -194,31 +191,18 @@ type REFCYC_PER_PTE_GROUP_FLIP_C; \ type REFCYC_PER_META_CHUNK_FLIP_C; \ type VM_GROUP_SIZE -#endif struct dcn_hubp2_registers { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_COMMON_VARIABLE_LIST; -#else - DCN2_HUBP_REG_COMMON_VARIABLE_LIST; -#endif }; struct dcn_hubp2_shift { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#else - DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); -#endif }; struct dcn_hubp2_mask { -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#else - DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); -#endif }; struct dcn20_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 1212da12c414..32878a65bdd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -25,17 +25,15 @@ #include <linux/delay.h> #include "dm_services.h" +#include "basics/dc_common.h" #include "dm_helpers.h" #include "core_types.h" #include "resource.h" -#include "dcn20/dcn20_resource.h" -#include "dce110/dce110_hw_sequencer.h" -#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_resource.h" #include "dcn20_hwseq.h" #include "dce/dce_hwseq.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dcn20/dcn20_dsc.h" -#endif +#include "dcn20_dsc.h" +#include "dcn20_optc.h" #include "abm.h" #include "clk_mgr.h" #include "dmcu.h" @@ -45,10 +43,9 @@ #include "ipp.h" #include "mpc.h" #include "mcif_wb.h" +#include "dchubbub.h" #include "reg_helper.h" #include "dcn10/dcn10_cm_common.h" -#include "dcn10/dcn10_hubbub.h" -#include "dcn10/dcn10_optc.h" #include "dc_link_dp.h" #include "vm_helper.h" #include "dccg.h" @@ -64,7 +61,125 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn20_enable_power_gating_plane( +static int find_free_gsl_group(const struct dc *dc) +{ + if (dc->res_pool->gsl_groups.gsl_0 == 0) + return 1; + if (dc->res_pool->gsl_groups.gsl_1 == 0) + return 2; + if (dc->res_pool->gsl_groups.gsl_2 == 0) + return 3; + + return 0; +} + +/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) + * This is only used to lock pipes in pipe splitting case with immediate flip + * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, + * so we get tearing with freesync since we cannot flip multiple pipes + * atomically. + * We use GSL for this: + * - immediate flip: find first available GSL group if not already assigned + * program gsl with that group, set current OTG as master + * and always us 0x4 = AND of flip_ready from all pipes + * - vsync flip: disable GSL if used + * + * Groups in stream_res are stored as +1 from HW registers, i.e. + * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 + * Using a magic value like -1 would require tracking all inits/resets + */ +static void dcn20_setup_gsl_group_as_lock( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable) +{ + struct gsl_params gsl; + int group_idx; + + memset(&gsl, 0, sizeof(struct gsl_params)); + + if (enable) { + /* return if group already assigned since GSL was set up + * for vsync flip, we would unassign so it can't be "left over" + */ + if (pipe_ctx->stream_res.gsl_group > 0) + return; + + group_idx = find_free_gsl_group(dc); + ASSERT(group_idx != 0); + pipe_ctx->stream_res.gsl_group = group_idx; + + /* set gsl group reg field and mark resource used */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 1; + dc->res_pool->gsl_groups.gsl_0 = 1; + break; + case 2: + gsl.gsl1_en = 1; + dc->res_pool->gsl_groups.gsl_1 = 1; + break; + case 3: + gsl.gsl2_en = 1; + dc->res_pool->gsl_groups.gsl_2 = 1; + break; + default: + BREAK_TO_DEBUGGER(); + return; // invalid case + } + gsl.gsl_master_en = 1; + } else { + group_idx = pipe_ctx->stream_res.gsl_group; + if (group_idx == 0) + return; // if not in use, just return + + pipe_ctx->stream_res.gsl_group = 0; + + /* unset gsl group reg field and mark resource free */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 0; + dc->res_pool->gsl_groups.gsl_0 = 0; + break; + case 2: + gsl.gsl1_en = 0; + dc->res_pool->gsl_groups.gsl_1 = 0; + break; + case 3: + gsl.gsl2_en = 0; + dc->res_pool->gsl_groups.gsl_2 = 0; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + gsl.gsl_master_en = 0; + } + + /* at this point we want to program whether it's to enable or disable */ + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + pipe_ctx->stream_res.tg->funcs->set_gsl( + pipe_ctx->stream_res.tg, + &gsl); + + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + } else + BREAK_TO_DEBUGGER(); +} + +void dcn20_set_flip_control_gsl( + struct pipe_ctx *pipe_ctx, + bool flip_immediate) +{ + if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) + pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( + pipe_ctx->plane_res.hubp, flip_immediate); + +} + +void dcn20_enable_power_gating_plane( struct dce_hwseq *hws, bool enable) { @@ -128,44 +243,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws) /* This value is dependent on the hardware pipeline delay so set once per SOC */ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); } -void dcn20_display_init(struct dc *dc) -{ - struct dce_hwseq *hws = dc->hwseq; - - /* RBBMIF - * disable RBBMIF timeout detection for all clients - * Ensure RBBMIF does not drop register accesses due to the per-client timeout - */ - REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); - REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - - /* DCCG */ - dcn20_dccg_init(hws); - - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0); - - /* DCHUB/MMHUBBUB - * set global timer refclk divider - * 100Mhz refclk -> 2 - * 27Mhz refclk -> 1 - * 48Mhz refclk -> 1 - */ - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(REFCLK_CNTL, 0); - - /* OPTC - * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc - */ - - /* AZ - * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, - * if not, it should be programmed according to the ref clock - */ - REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); - /* Enable controller clock gating */ - REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); -} void dcn20_disable_vga( struct dce_hwseq *hws) @@ -178,15 +255,15 @@ void dcn20_disable_vga( REG_WRITE(D6VGA_CONTROL, 0); } -void dcn20_program_tripleBuffer( +void dcn20_program_triple_buffer( const struct dc *dc, struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer) + bool enable_triple_buffer) { if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( pipe_ctx->plane_res.hubp, - enableTripleBuffer); + enable_triple_buffer); } } @@ -195,6 +272,7 @@ void dcn20_init_blank( struct dc *dc, struct timing_generator *tg) { + struct dce_hwseq *hws = dc->hwseq; enum dc_color_space color_space; struct tg_color black_color = {0}; struct output_pixel_processor *opp = NULL; @@ -225,6 +303,7 @@ void dcn20_init_blank( opp->funcs->opp_set_disp_pattern_generator( opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, @@ -234,17 +313,17 @@ void dcn20_init_blank( bottom_opp->funcs->opp_set_disp_pattern_generator( bottom_opp, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, otg_active_height); } - dcn20_hwss_wait_for_blank_complete(opp); + hws->funcs.wait_for_blank_complete(opp); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static void dcn20_dsc_pg_control( +void dcn20_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) @@ -320,9 +399,8 @@ static void dcn20_dsc_pg_control( if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } -#endif -static void dcn20_dpp_pg_control( +void dcn20_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) @@ -396,7 +474,7 @@ static void dcn20_dpp_pg_control( } -static void dcn20_hubp_pg_control( +void dcn20_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) @@ -473,8 +551,9 @@ static void dcn20_hubp_pg_control( /* disable HW used by plane. * note: cannot disable until disconnect is complete */ -static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; @@ -495,7 +574,7 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->power_gated = true; dc->optimized_required = false; /* We're powering off, no need to optimize */ - dc->hwss.plane_atomic_power_down(dc, + hws->funcs.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp, pipe_ctx->plane_res.hubp); @@ -526,6 +605,7 @@ enum dc_status dcn20_enable_stream_timing( struct dc_state *context, struct dc *dc) { + struct dce_hwseq *hws = dc->hwseq; struct dc_stream_state *stream = pipe_ctx->stream; struct drr_params params = {0}; unsigned int event_triggers = 0; @@ -585,7 +665,7 @@ enum dc_status dcn20_enable_stream_timing( pipe_ctx->stream_res.opp, true); - dc->hwss.blank_pixel_data(dc, pipe_ctx, true); + hws->funcs.blank_pixel_data(dc, pipe_ctx, true); /* VTG is within DCHUB command block. DCFCLK is always on */ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { @@ -593,7 +673,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp); + hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -649,7 +729,7 @@ void dcn20_program_output_csc(struct dc *dc, } } -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) { int mpcc_id = pipe_ctx->plane_res.hubp->inst; @@ -688,7 +768,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static bool dcn20_set_blend_lut( +bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -710,7 +790,7 @@ static bool dcn20_set_blend_lut( return result; } -static bool dcn20_set_shaper_3dlut( +bool dcn20_set_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -736,20 +816,14 @@ static bool dcn20_set_shaper_3dlut( else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); - if (plane_state->lut3d_func && - plane_state->lut3d_func->state.bits.initialized == 1 && - plane_state->lut3d_func->hdr_multiplier != 0) - dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, - plane_state->lut3d_func->hdr_multiplier); - else - dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000); - return result; } -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state) +bool dcn20_set_input_transfer_func(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state) { + struct dce_hwseq *hws = dc->hwseq; struct dpp *dpp_base = pipe_ctx->plane_res.dpp; const struct dc_transfer_func *tf = NULL; bool result = true; @@ -758,8 +832,8 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, if (dpp_base == NULL || plane_state == NULL) return false; - dcn20_set_shaper_3dlut(pipe_ctx, plane_state); - dcn20_set_blend_lut(pipe_ctx, plane_state); + hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); + hws->funcs.set_blend_lut(pipe_ctx, plane_state); if (plane_state->in_transfer_func) tf = plane_state->in_transfer_func; @@ -804,6 +878,11 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, IPP_DEGAMMA_MODE_BYPASS); break; case TRANSFER_FUNCTION_PQ: + dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); + cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); + result = true; + break; default: result = false; break; @@ -824,7 +903,7 @@ bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } -static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; @@ -855,6 +934,7 @@ void dcn20_blank_pixel_data( struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space = stream->output_color_space; enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; + enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; struct pipe_ctx *odm_pipe; int odm_cnt = 1; @@ -873,8 +953,10 @@ void dcn20_blank_pixel_data( if (stream_res->abm) stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); - if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + } } else { test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; } @@ -882,6 +964,7 @@ void dcn20_blank_pixel_data( stream_res->opp->funcs->opp_set_disp_pattern_generator( stream_res->opp, test_pattern, + test_pattern_color_space, stream->timing.display_color_depth, &black_color, width, @@ -892,6 +975,7 @@ void dcn20_blank_pixel_data( odm_pipe->stream_res.opp, dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, + test_pattern_color_space, stream->timing.display_color_depth, &black_color, width, @@ -999,72 +1083,6 @@ void dcn20_enable_plane( } -static void dcn20_program_pipe( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - pipe_ctx->plane_state->update_flags.bits.full_update = - context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update; - - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dcn20_enable_plane(dc, pipe_ctx, context); - - update_dchubp_dpp(dc, pipe_ctx, context); - - set_hdr_multiplier(pipe_ctx); - - if (pipe_ctx->plane_state->update_flags.bits.full_update || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); - - /* dcn10_translate_regamma_to_hw_format takes 750us to finish - * only do gamma programming for full update. - * TODO: This can be further optimized/cleaned up - * Always call this for now since it does memcmp inside before - * doing heavy calculation and programming - */ - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); -} - -static void dcn20_program_all_pipe_in_tree( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) { - bool blank = !is_pipe_tree_visible(pipe_ctx); - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); - - pipe_ctx->stream_res.tg->funcs->set_vtg_params( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); - - if (dc->hwss.update_odm) - dc->hwss.update_odm(dc, context, pipe_ctx); - } - - if (pipe_ctx->plane_state != NULL) - dcn20_program_pipe(dc, pipe_ctx, context); - - if (pipe_ctx->bottom_pipe != NULL) { - ASSERT(pipe_ctx->bottom_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); - } else if (pipe_ctx->next_odm_pipe != NULL) { - ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context); - } -} - void dcn20_pipe_control_lock_global( struct dc *dc, struct pipe_ctx *pipe, @@ -1103,6 +1121,25 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + const int TIMEOUT_FOR_FLIP_PENDING = 100000; + int i; + + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) + break; + udelay(1); + } + + if (pipe->bottom_pipe != NULL) { + for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { + if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) + break; + udelay(1); + } + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ @@ -1124,114 +1161,460 @@ void dcn20_pipe_control_lock( } } -static void dcn20_apply_ctx_for_surface( - struct dc *dc, - const struct dc_stream_state *stream, - int num_planes, - struct dc_state *context) +static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) { - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; - int i; - struct timing_generator *tg; - bool removed_pipe[6] = { false }; - bool interdependent_update = false; - struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); - struct pipe_ctx *prev_top_pipe_to_program = - find_top_pipe_for_stream(dc, dc->current_state, stream); - DC_LOGGER_INIT(dc->ctx->logger); + new_pipe->update_flags.raw = 0; - if (!top_pipe_to_program) + /* Exit on unchanged, unused pipe */ + if (!old_pipe->plane_state && !new_pipe->plane_state) return; + /* Detect pipe enable/disable */ + if (!old_pipe->plane_state && new_pipe->plane_state) { + new_pipe->update_flags.bits.enable = 1; + new_pipe->update_flags.bits.mpcc = 1; + new_pipe->update_flags.bits.dppclk = 1; + new_pipe->update_flags.bits.hubp_interdependent = 1; + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.gamut_remap = 1; + new_pipe->update_flags.bits.scaler = 1; + new_pipe->update_flags.bits.viewport = 1; + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + new_pipe->update_flags.bits.odm = 1; + new_pipe->update_flags.bits.global_sync = 1; + } + return; + } + if (old_pipe->plane_state && !new_pipe->plane_state) { + new_pipe->update_flags.bits.disable = 1; + return; + } - /* Carry over GSL groups in case the context is changing. */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == stream && - pipe_ctx->stream == old_pipe_ctx->stream) - pipe_ctx->stream_res.gsl_group = - old_pipe_ctx->stream_res.gsl_group; + /* Detect top pipe only changes */ + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + /* Detect odm changes */ + if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) + || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) + || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) + || old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.odm = 1; + + /* Detect global sync changes */ + if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset + || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start + || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset + || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) + new_pipe->update_flags.bits.global_sync = 1; } - tg = top_pipe_to_program->stream_res.tg; + /* + * Detect opp / tg change, only set on change, not on enable + * Assume mpcc inst = pipe index, if not this code needs to be updated + * since mpcc is what is affected by these. In fact all of our sequence + * makes this assumption at the moment with how hubp reset is matched to + * same index mpcc reset. + */ + if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.opp_changed = 1; + if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) + new_pipe->update_flags.bits.tg_changed = 1; + + /* Detect mpcc blending changes, only dpp inst and bot matter here */ + if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp + || old_pipe->stream_res.opp != new_pipe->stream_res.opp + || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && new_pipe->bottom_pipe + && old_pipe->bottom_pipe->plane_res.mpcc_inst + != new_pipe->bottom_pipe->plane_res.mpcc_inst)) + new_pipe->update_flags.bits.mpcc = 1; + + /* Detect dppclk change */ + if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) + new_pipe->update_flags.bits.dppclk = 1; + + /* Check for scl update */ + if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) + new_pipe->update_flags.bits.scaler = 1; + /* Check for vp update */ + if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) + || memcmp(&old_pipe->plane_res.scl_data.viewport_c, + &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) + new_pipe->update_flags.bits.viewport = 1; + + /* Detect dlg/ttu/rq updates */ + { + struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; + struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; + + /* Detect pipe interdependent updates */ + if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || + old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || + old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || + old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || + old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || + old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || + old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || + old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || + old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || + old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || + old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || + old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || + old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || + old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || + old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { + old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; + old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; + old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; + old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; + old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; + old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; + old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; + old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; + old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; + old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; + old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; + old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; + old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; + old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; + old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; + new_pipe->update_flags.bits.hubp_interdependent = 1; + } + /* Detect any other updates to ttu/rq/dlg */ + if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || + memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || + memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + } +} - interdependent_update = top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update; +static void dcn20_update_dchubp_dpp( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; - if (interdependent_update) - lock_all_pipes(dc, context, true); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, true); + if (pipe_ctx->update_flags.bits.dppclk) + dpp->funcs->dpp_dppclk_control(dpp, false, true); - if (num_planes == 0) { - /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG + * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. + * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG + */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); + + hubp->funcs->hubp_setup( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, + &pipe_ctx->rq_regs, + &pipe_ctx->pipe_dlg_param); + } + if (pipe_ctx->update_flags.bits.hubp_interdependent) + hubp->funcs->hubp_setup_interdependent( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs); + + if (pipe_ctx->update_flags.bits.enable || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + struct dc_bias_and_scale bns_params = {0}; + + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + + if (dpp->funcs->dpp_program_bias_and_scale) { + //TODO :for CNVC set scale and bias registers if necessary + build_prescale_params(&bns_params, plane_state); + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); + } } - /* Disconnect unused mpcc */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - /* - * Powergate reused pipes that are not powergated - * fairly hacky right now, using opp_id as indicator - * TODO: After move dc_post to dc_update, this will - * be removed. - */ - if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { - if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID) - dc->hwss.disable_plane(dc, old_pipe_ctx); + if (pipe_ctx->update_flags.bits.mpcc + || plane_state->update_flags.bits.global_alpha_change + || plane_state->update_flags.bits.per_pixel_alpha_change) { + // MPCC inst is equal to pipe index in practice + int mpcc_inst = pipe_ctx->pipe_idx; + int opp_inst; + int opp_count = dc->res_pool->res_cap->num_opp; + + for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { + if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { + dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); + dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; + break; + } } + hws->funcs.update_mpcc(dc, pipe_ctx); + } + + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); + /* scaler configuration */ + pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } - if ((!pipe_ctx->plane_state || - pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) + hubp->funcs->mem_program_viewport( + hubp, + &pipe_ctx->plane_res.scl_data.viewport, + &pipe_ctx->plane_res.scl_data.viewport_c, + plane_state->rotation); + + /* Any updates are handled in dc interface, just need to apply existing for plane enable */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) + && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + } - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); - removed_pipe[i] = true; + /* Any updates are handled in dc interface, just need + * to apply existing for plane enable / opp change */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->stream->update_flags.bits.gamut_remap + || pipe_ctx->stream->update_flags.bits.out_csc) { + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); + + /*call the dcn2 method which uses mpc csc*/ + dc->hwss.program_output_csc(dc, + pipe_ctx, + pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, + hubp->opp_id); + } - DC_LOG_DC("Reset mpcc for pipe %d\n", - old_pipe_ctx->pipe_idx); - } + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hubp->funcs->hubp_program_surface_config( + hubp, + plane_state->format, + &plane_state->tiling_info, + &size, + plane_state->rotation, + &plane_state->dcc, + plane_state->horizontal_mirror, + 0); + hubp->power_gated = false; + } + + if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) + hws->funcs.update_plane_addr(dc, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) + hubp->funcs->set_blank(hubp, false); +} + + +static void dcn20_program_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + /* Only need to unblank on top pipe */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) + && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) + hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + + if (pipe_ctx->update_flags.bits.global_sync) { + pipe_ctx->stream_res.tg->funcs->program_global_sync( + pipe_ctx->stream_res.tg, + pipe_ctx->pipe_dlg_param.vready_offset, + pipe_ctx->pipe_dlg_param.vstartup_start, + pipe_ctx->pipe_dlg_param.vupdate_offset, + pipe_ctx->pipe_dlg_param.vupdate_width); + + pipe_ctx->stream_res.tg->funcs->set_vtg_params( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); + } + + if (pipe_ctx->update_flags.bits.odm) + hws->funcs.update_odm(dc, context, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) + dcn20_enable_plane(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) + dcn20_update_dchubp_dpp(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->plane_state->update_flags.bits.hdr_mult) + hws->funcs.set_hdr_multiplier(pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change) + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->signal); + + pipe_ctx->stream_res.opp->funcs->opp_program_fmt( + pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, + &pipe_ctx->stream->clamping); } +} - if (num_planes > 0) - dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context); +static bool does_pipe_need_lock(struct pipe_ctx *pipe) +{ + if ((pipe->plane_state && pipe->plane_state->update_flags.raw) + || pipe->update_flags.raw) + return true; + if (pipe->bottom_pipe) + return does_pipe_need_lock(pipe->bottom_pipe); - /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); + return false; +} - if (interdependent_update) - for (i = 0; i < dc->res_pool->pipe_count; i++) { +void dcn20_program_front_end_for_ctx( + struct dc *dc, + struct dc_state *context) +{ + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + int i; + struct dce_hwseq *hws = dc->hwseq; + bool pipe_locked[MAX_PIPES] = {false}; + DC_LOGGER_INIT(dc->ctx->logger); + + /* Carry over GSL groups in case the context is changing. */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) + context->res_ctx.pipe_ctx[i].stream_res.gsl_group = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; + + /* Set pipe update flags and lock pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + &context->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (!context->res_ctx.pipe_ctx[i].top_pipe && + does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream || - !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) - continue; + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); + pipe_locked[i] = true; + } - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( - pipe_ctx->plane_res.hubp, - &pipe_ctx->dlg_regs, - &pipe_ctx->ttu_regs); + /* OTG blank before disabling all front ends */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + && !context->res_ctx.pipe_ctx[i].top_pipe + && !context->res_ctx.pipe_ctx[i].prev_odm_pipe + && context->res_ctx.pipe_ctx[i].stream) + hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + + /* Disconnect mpcc */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { + hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } - if (interdependent_update) - lock_all_pipes(dc, context, false); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, false); + /* + * Program all updated pipes, order matters for mpcc setup. Start with + * top pipe and program all pipes that follow in order + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe) { + while (pipe) { + dcn20_program_pipe(dc, pipe, context); + pipe = pipe->bottom_pipe; + } + /* Program secondary blending tree and writeback pipes */ + pipe = &context->res_ctx.pipe_ctx[i]; + if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 + && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) + && hws->funcs.program_all_writeback_pipes_in_tree) + hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + } + } + + /* Unlock all locked pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (pipe_locked[i]) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); + } for (i = 0; i < dc->res_pool->pipe_count; i++) - if (removed_pipe[i]) - dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) + dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -1239,15 +1622,22 @@ static void dcn20_apply_ctx_for_surface( * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which * is unsupported on DCN. */ - i = 0; - if (num_planes > 0 && top_pipe_to_program && - (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) { - while (i < TIMEOUT_FOR_PIPE_ENABLE_MS && - top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) { - i += 1; - msleep(1); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { + struct hubp *hubp = pipe->plane_res.hubp; + int j = 0; + + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + msleep(1); } } + + /* WA to apply WM setting*/ + if (dc->hwseq->wa.DEGVIDCN21) + dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); } @@ -1292,6 +1682,7 @@ bool dcn20_update_bandwidth( struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) @@ -1319,8 +1710,12 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + if (pipe_ctx->prev_odm_pipe == NULL) - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); + + if (hws->funcs.setup_vupdate_interrupt) + hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1334,10 +1729,11 @@ bool dcn20_update_bandwidth( return true; } -static void dcn20_enable_writeback( +void dcn20_enable_writeback( struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info) + struct dc_writeback_info *wb_info, + struct dc_state *context) { struct dwbc *dwb; struct mcif_wb *mcif_wb; @@ -1354,7 +1750,7 @@ static void dcn20_enable_writeback( optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); - mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); /* Enable MCIF_WB */ mcif_wb->funcs->enable_mcif(mcif_wb); /* Enable DWB */ @@ -1377,7 +1773,7 @@ void dcn20_disable_writeback( mcif_wb->funcs->disable_mcif(mcif_wb); } -bool dcn20_hwss_wait_for_blank_complete( +bool dcn20_wait_for_blank_complete( struct output_pixel_processor *opp) { int counter; @@ -1406,9 +1802,8 @@ bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) return hubp->funcs->dmdata_status_done(hubp); } -static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dce_hwseq *hws = dc->hwseq; if (pipe_ctx->stream_res.dsc) { @@ -1420,12 +1815,10 @@ static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx odm_pipe = odm_pipe->next_odm_pipe; } } -#endif } -static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dce_hwseq *hws = dc->hwseq; if (pipe_ctx->stream_res.dsc) { @@ -1437,7 +1830,6 @@ static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) odm_pipe = odm_pipe->next_odm_pipe; } } -#endif } void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) @@ -1460,12 +1852,7 @@ void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) hubp->funcs->dmdata_set_attributes(hubp, &attr); } -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx) -{ - dce110_disable_stream(pipe_ctx); -} - -static void dcn20_init_vm_ctx( +void dcn20_init_vm_ctx( struct dce_hwseq *hws, struct dc *dc, struct dc_virtual_addr_space_config *va_config, @@ -1487,7 +1874,7 @@ static void dcn20_init_vm_ctx( dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); } -static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) { struct dcn_hubbub_phys_addr_config config; @@ -1531,8 +1918,7 @@ static bool patch_address_for_sbs_tb_stereo( return false; } - -static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { bool addr_patched = false; PHYSICAL_ADDRESS_LOC addr; @@ -1566,6 +1952,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; struct pipe_ctx *odm_pipe; params.opp_cnt = 1; @@ -1578,7 +1965,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) { - if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) + if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) params.timing.pix_clk_100hz /= 2; pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); @@ -1586,14 +1973,14 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - link->dc->hwss.edp_backlight_control(link, true); + hws->funcs.edp_backlight_control(link, true); } } -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; - int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); if (start_line < 0) start_line = 0; @@ -1636,11 +2023,9 @@ static void dcn20_reset_back_end_for_pipe( } } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else if (pipe_ctx->stream_res.dsc) { dp_set_dsc_enable(pipe_ctx, false); } -#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable @@ -1671,11 +2056,12 @@ static void dcn20_reset_back_end_for_pipe( pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } -static void dcn20_reset_hw_ctx_wrap( +void dcn20_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context) { int i; + struct dce_hwseq *hws = dc->hwseq; /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { @@ -1694,16 +2080,39 @@ static void dcn20_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); - if (dc->hwss.enable_stream_gating) - dc->hwss.enable_stream_gating(dc, pipe_ctx); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } } } -static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + const struct tg_color pipe_colors[6] = { + {MAX_TG_COLOR_VALUE, 0, 0}, // red + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow + {0, MAX_TG_COLOR_VALUE, 0}, // blue + {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple + {0, 0, MAX_TG_COLOR_VALUE}, // green + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange + }; + + struct pipe_ctx *top_pipe = pipe_ctx; + + while (top_pipe->top_pipe) { + top_pipe = top_pipe->top_pipe; + } + + *color = pipe_colors[top_pipe->pipe_idx]; +} + +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { + struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = { {0} }; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; @@ -1714,10 +2123,13 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) // input to MPCC is always RGB, by default leave black_color at 0 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - dcn10_get_hdr_visual_confirm_color( + hws->funcs.get_hdr_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - dcn10_get_surface_visual_confirm_color( + hws->funcs.get_surface_visual_confirm_color( + pipe_ctx, &blnd_cfg.black_color); + } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { + dcn20_get_mpctree_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); } @@ -1751,12 +2163,6 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; - /* If there is no full update, don't need to touch MPC tree*/ - if (!pipe_ctx->plane_state->update_flags.bits.full_update) { - mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); - return; - } - /* check if this MPCC is already being used */ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); /* remove MPCC if being used */ @@ -1781,125 +2187,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->mpcc_id = mpcc_id; } -static int find_free_gsl_group(const struct dc *dc) -{ - if (dc->res_pool->gsl_groups.gsl_0 == 0) - return 1; - if (dc->res_pool->gsl_groups.gsl_1 == 0) - return 2; - if (dc->res_pool->gsl_groups.gsl_2 == 0) - return 3; - - return 0; -} - -/* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) - * This is only used to lock pipes in pipe splitting case with immediate flip - * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, - * so we get tearing with freesync since we cannot flip multiple pipes - * atomically. - * We use GSL for this: - * - immediate flip: find first available GSL group if not already assigned - * program gsl with that group, set current OTG as master - * and always us 0x4 = AND of flip_ready from all pipes - * - vsync flip: disable GSL if used - * - * Groups in stream_res are stored as +1 from HW registers, i.e. - * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 - * Using a magic value like -1 would require tracking all inits/resets - */ -void dcn20_setup_gsl_group_as_lock( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enable) -{ - struct gsl_params gsl; - int group_idx; - - memset(&gsl, 0, sizeof(struct gsl_params)); - - if (enable) { - /* return if group already assigned since GSL was set up - * for vsync flip, we would unassign so it can't be "left over" - */ - if (pipe_ctx->stream_res.gsl_group > 0) - return; - - group_idx = find_free_gsl_group(dc); - ASSERT(group_idx != 0); - pipe_ctx->stream_res.gsl_group = group_idx; - - /* set gsl group reg field and mark resource used */ - switch (group_idx) { - case 1: - gsl.gsl0_en = 1; - dc->res_pool->gsl_groups.gsl_0 = 1; - break; - case 2: - gsl.gsl1_en = 1; - dc->res_pool->gsl_groups.gsl_1 = 1; - break; - case 3: - gsl.gsl2_en = 1; - dc->res_pool->gsl_groups.gsl_2 = 1; - break; - default: - BREAK_TO_DEBUGGER(); - return; // invalid case - } - gsl.gsl_master_en = 1; - } else { - group_idx = pipe_ctx->stream_res.gsl_group; - if (group_idx == 0) - return; // if not in use, just return - - pipe_ctx->stream_res.gsl_group = 0; - - /* unset gsl group reg field and mark resource free */ - switch (group_idx) { - case 1: - gsl.gsl0_en = 0; - dc->res_pool->gsl_groups.gsl_0 = 0; - break; - case 2: - gsl.gsl1_en = 0; - dc->res_pool->gsl_groups.gsl_1 = 0; - break; - case 3: - gsl.gsl2_en = 0; - dc->res_pool->gsl_groups.gsl_2 = 0; - break; - default: - BREAK_TO_DEBUGGER(); - return; - } - gsl.gsl_master_en = 0; - } - - /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { - pipe_ctx->stream_res.tg->funcs->set_gsl( - pipe_ctx->stream_res.tg, - &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); - } else - BREAK_TO_DEBUGGER(); -} - -static void dcn20_set_flip_control_gsl( - struct pipe_ctx *pipe_ctx, - bool flip_immediate) -{ - if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) - pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( - pipe_ctx->plane_res.hubp, flip_immediate); - -} - -static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = pipe_ctx->stream->link->cur_link_settings.lane_count; @@ -1919,8 +2207,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - if (link->dc->hwss.program_dmdata_engine) - link->dc->hwss.program_dmdata_engine(pipe_ctx); + if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { + if (link->dc->hwss.program_dmdata_engine) + link->dc->hwss.program_dmdata_engine(pipe_ctx); + } link->dc->hwss.update_info_frame(pipe_ctx); @@ -1945,7 +2235,7 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) } } -static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1971,7 +2261,7 @@ static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) hubp->inst, mode); } -static void dcn20_fpga_init_hw(struct dc *dc) +void dcn20_fpga_init_hw(struct dc *dc) { int i, j; struct dce_hwseq *hws = dc->hwseq; @@ -1986,13 +2276,13 @@ static void dcn20_fpga_init_hw(struct dc *dc) res_pool->dccg->funcs->dccg_init(res_pool->dccg); //Enable ability to power gate / don't force power on permanently - dc->hwss.enable_power_gating_plane(hws, true); + hws->funcs.enable_power_gating_plane(hws, true); // Specific to FPGA dccg and registers REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); - dcn20_dccg_init(hws); + hws->funcs.dccg_init(hws); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); @@ -2056,7 +2346,7 @@ static void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - hwss1_plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -2085,56 +2375,3 @@ static void dcn20_fpga_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } } - -void dcn20_hw_sequencer_construct(struct dc *dc) -{ - dcn10_hw_sequencer_construct(dc); - dc->hwss.unblank_stream = dcn20_unblank_stream; - dc->hwss.update_plane_addr = dcn20_update_plane_addr; - dc->hwss.enable_stream_timing = dcn20_enable_stream_timing; - dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; - dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; - dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; - dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface; - dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; - dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; - dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; - dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth; - dc->hwss.update_bandwidth = dcn20_update_bandwidth; - dc->hwss.enable_writeback = dcn20_enable_writeback; - dc->hwss.disable_writeback = dcn20_disable_writeback; - dc->hwss.program_output_csc = dcn20_program_output_csc; - dc->hwss.update_odm = dcn20_update_odm; - dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; - dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; - dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; - dc->hwss.enable_stream = dcn20_enable_stream; - dc->hwss.disable_stream = dcn20_disable_stream; - dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; - dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; - dc->hwss.disable_stream_gating = dcn20_disable_stream_gating; - dc->hwss.enable_stream_gating = dcn20_enable_stream_gating; - dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt; - dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap; - dc->hwss.update_mpcc = dcn20_update_mpcc; - dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl; - dc->hwss.init_blank = dcn20_init_blank; - dc->hwss.disable_plane = dcn20_disable_plane; - dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable; - dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; - dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; - dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; -#else - dc->hwss.dsc_pg_control = NULL; -#endif - dc->hwss.disable_vga = dcn20_disable_vga; - - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - dc->hwss.init_hw = dcn20_fpga_init_hw; - dc->hwss.init_pipes = NULL; - } - - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 92ab3dd91814..eecd7a26ec4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -26,74 +26,113 @@ #ifndef __DC_HWSS_DCN20_H__ #define __DC_HWSS_DCN20_H__ -struct dc; +#include "hw_sequencer_private.h" -void dcn20_hw_sequencer_construct(struct dc *dc); - -enum dc_status dcn20_enable_stream_timing( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, - struct dc *dc); - -void dcn20_blank_pixel_data( +bool dcn20_set_blend_lut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_program_front_end_for_ctx( struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool blank); - + struct dc_state *context); +void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); void dcn20_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, int opp_id); - +void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); +void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_blank_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); +void dcn20_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn20_pipe_control_lock_global( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); void dcn20_prepare_bandwidth( struct dc *dc, struct dc_state *context); - void dcn20_optimize_bandwidth( struct dc *dc, struct dc_state *context); - bool dcn20_update_bandwidth( struct dc *dc, struct dc_state *context); - +void dcn20_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); +enum dc_status dcn20_enable_stream_timing( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); +void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_init_blank( + struct dc *dc, + struct timing_generator *tg); +void dcn20_disable_vga( + struct dce_hwseq *hws); +void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_power_gating_plane( + struct dce_hwseq *hws, + bool enable); +void dcn20_dpp_pg_control( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); +void dcn20_hubp_pg_control( + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); +void dcn20_program_triple_buffer( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable_triple_buffer); +void dcn20_enable_writeback( + struct dc *dc, + const struct dc_stream_status *stream_status, + struct dc_writeback_info *wb_info, + struct dc_state *context); void dcn20_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst); - -bool dcn20_hwss_wait_for_blank_complete( - struct output_pixel_processor *opp); - -bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream); - -bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state); - +void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx); - +void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx); void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx); - -void dcn20_disable_stream(struct pipe_ctx *pipe_ctx); - -void dcn20_program_tripleBuffer( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer); - -void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx); - -void dcn20_pipe_control_lock_global( +void dcn20_init_vm_ctx( + struct dce_hwseq *hws, struct dc *dc, - struct pipe_ctx *pipe, - bool lock); -void dcn20_setup_gsl_group_as_lock(const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enable); + struct dc_virtual_addr_space_config *va_config, + int vmid); +void dcn20_set_flip_control_gsl( + struct pipe_ctx *pipe_ctx, + bool flip_immediate); +void dcn20_dsc_pg_control( + struct dce_hwseq *hws, + unsigned int dsc_inst, + bool power_on); +void dcn20_fpga_init_hw(struct dc *dc); +bool dcn20_wait_for_blank_complete( + struct output_pixel_processor *opp); void dcn20_dccg_init(struct dce_hwseq *hws); -void dcn20_init_blank( - struct dc *dc, - struct timing_generator *tg); -void dcn20_display_init(struct dc *dc); +int dcn20_init_sys_ctx(struct dce_hwseq *hws, + struct dc *dc, + struct dc_phy_addr_space_config *pa_config); + #endif /* __DC_HWSS_DCN20_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c new file mode 100644 index 000000000000..d51e02fdab4d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -0,0 +1,133 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20_hwseq.h" + +static const struct hw_sequencer_funcs dcn20_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, +}; + +static const struct hwseq_private_funcs dcn20_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .dsc_pg_control = NULL, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn20_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn20_funcs; + dc->hwseq->funcs = dcn20_private_funcs; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->hwss.init_hw = dcn20_fpga_init_hw; + dc->hwseq->funcs.init_pipes = NULL; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h new file mode 100644 index 000000000000..12277797cd71 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN20_INIT_H__ +#define __DC_DCN20_INIT_H__ + +struct dc; + +void dcn20_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index e476f27aa3a9..e4ac73035c84 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -168,10 +168,8 @@ static struct mpll_cfg dcn2_mpll_cfg[] = { void enc2_fec_set_enable(struct link_encoder *enc, bool enable) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DC_LOG_DSC("%s FEC at link encoder inst %d", enable ? "Enabling" : "Disabling", enc->id.enum_id); -#endif REG_UPDATE(DP_DPHY_CNTL, DPHY_FEC_EN, enable); } @@ -192,7 +190,6 @@ bool enc2_fec_is_active(struct link_encoder *enc) return (active != 0); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* this function reads dsc related register fields to be logged later in dcn10_log_hw_state * into a dcn_dsc_state struct. */ @@ -203,8 +200,8 @@ void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s) REG_GET(DP_DPHY_CNTL, DPHY_FEC_EN, &s->dphy_fec_en); REG_GET(DP_DPHY_CNTL, DPHY_FEC_READY_SHADOW, &s->dphy_fec_ready_shadow); REG_GET(DP_DPHY_CNTL, DPHY_FEC_ACTIVE_STATUS, &s->dphy_fec_active_status); + REG_GET(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, &s->dp_link_training_complete); } -#endif static bool update_cfg_data( struct dcn10_link_encoder *enc10, @@ -315,9 +312,7 @@ void enc2_hw_init(struct link_encoder *enc) } static const struct link_encoder_funcs dcn20_link_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .read_state = link_enc2_read_state, -#endif .validate_output_with_stream = dcn10_link_encoder_validate_output_with_stream, .hw_init = enc2_hw_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 3736b5548a25..62dfd34c69f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -91,6 +91,13 @@ struct mpll_cfg { uint32_t ref_range; uint32_t ref_clk; bool hdmimode_enable; + bool sup_pre_hp; + bool dp_tx0_vergdrv_byp; + bool dp_tx1_vergdrv_byp; + bool dp_tx2_vergdrv_byp; + bool dp_tx3_vergdrv_byp; + + }; struct dpcssys_phy_seq_cfg { @@ -151,9 +158,7 @@ void enc2_fec_set_ready(struct link_encoder *enc, bool ready); bool enc2_fec_is_active(struct link_encoder *enc); void enc2_hw_init(struct link_encoder *enc); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void link_enc2_read_state(struct link_encoder *enc, struct link_enc_state *s); -#endif void dcn20_link_encoder_enable_dp_output( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 5a188b2bc033..f90031ed58a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -345,6 +345,9 @@ static void mpc20_program_ogam_pwl( uint32_t i; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + PERF_TRACE(); + REG_SEQ_START(); + for (i = 0 ; i < num; i++) { REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg); @@ -463,6 +466,11 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } + + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 40164ed015ea..023cc71fad0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -41,6 +41,7 @@ void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, @@ -100,9 +101,22 @@ void opp2_set_disp_pattern_generator( TEST_PATTERN_DYN_RANGE_CEA : TEST_PATTERN_DYN_RANGE_VESA); + switch (color_space) { + case CONTROLLER_DP_COLOR_SPACE_YCBCR601: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR601; + break; + case CONTROLLER_DP_COLOR_SPACE_YCBCR709: + mode = TEST_PATTERN_MODE_COLORSQUARES_YCBCR709; + break; + case CONTROLLER_DP_COLOR_SPACE_RGB: + default: + mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; + break; + } + REG_UPDATE_6(DPG_CONTROL, DPG_EN, 1, - DPG_MODE, TEST_PATTERN_MODE_COLORSQUARES_RGB, + DPG_MODE, mode, DPG_DYNAMIC_RANGE, dyn_range, DPG_BIT_DEPTH, bit_depth, DPG_VRES, 6, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index abd8de9a78f8..4093bec172c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -140,6 +140,7 @@ void dcn20_opp_construct(struct dcn20_opp *oppn20, void opp2_set_disp_pattern_generator( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 2137e2be2140..673c83e2afd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc) REG_UPDATE(CONTROL, VTG0_ENABLE, 1); + REG_SEQ_START(); + /* Enable CRTC */ REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 3, OTG_MASTER_EN, 1); + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + return true; } @@ -167,7 +172,6 @@ void optc2_set_gsl_source_select( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */ void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc, int x_position, @@ -201,13 +205,12 @@ void optc2_set_dsc_config(struct timing_generator *optc, REG_UPDATE(OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } -#endif -/** - * PTI i think is already done somewhere else for 2ka - * (opp?, please double check. - * OPTC side only has 1 register to set for PTI_ENABLE) - */ +/*TEMP: Need to figure out inheritance model here.*/ +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return optc1_is_two_pixels_per_containter(timing); +} void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) @@ -221,7 +224,7 @@ void optc2_set_odm_bypass(struct timing_generator *optc, OPTC_SEG1_SRC_SEL, 0xf); REG_WRITE(OTG_H_TIMING_CNTL, 0); - h_div_2 = optc1_is_two_pixels_per_containter(dc_crtc_timing); + h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, h_div_2); REG_SET(OPTC_MEMORY_CONFIG, 0, @@ -287,6 +290,10 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 2; else *num_of_src_opp = 1; + + /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ + if (*src_opp_id_1 == 0xf) + *num_of_src_opp = 1; } void optc2_set_dwb_source(struct timing_generator *optc, @@ -444,9 +451,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, .configure_crc = optc1_configure_crc, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .set_dsc_config = optc2_set_dsc_config, -#endif .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, .set_odm_combine = optc2_set_odm_combine, @@ -456,7 +461,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, - .is_matching_timing = optc1_is_matching_timing + .get_hw_timing = optc1_get_hw_timing, }; void dcn20_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index 32a58431fd09..ac93fbfaee03 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -86,12 +86,10 @@ void optc2_set_gsl_source_select(struct timing_generator *optc, int group_idx, uint32_t gsl_ready_signal); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void optc2_set_dsc_config(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); -#endif void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); @@ -109,5 +107,5 @@ void optc2_triplebuffer_unlock(struct timing_generator *optc); void optc2_lock_doublebuffer_disable(struct timing_generator *optc); void optc2_lock_doublebuffer_enable(struct timing_generator *optc); void optc2_program_manual_trigger(struct timing_generator *optc); - +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); #endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 5a2763daff4d..cfc69919ef9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -28,6 +28,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn20_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" @@ -45,9 +47,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn20_opp.h" -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #include "dcn20_dsc.h" -#endif #include "dcn20_link_encoder.h" #include "dcn20_stream_encoder.h" @@ -59,6 +59,7 @@ #include "dml/display_mode_vba.h" #include "dcn20_dccg.h" #include "dcn20_vmid.h" +#include "dc_link_ddc.h" #include "navi10_ip_offset.h" @@ -82,8 +83,6 @@ #include "amdgpu_socbb.h" -/* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */ -#define SOC_BOUNDING_BOX_VALID false #define DC_LOGGER_INIT(logger) struct _vcs_dpi_ip_params_st dcn2_0_ip = { @@ -94,11 +93,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 0, .pte_group_size_bytes = 2048, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, -#else - .num_dsc = 0, -#endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 84, @@ -157,6 +152,74 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .xfc_fill_constant_bytes = 0, }; +struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { + .odm_capable = 1, + .gpuvm_enable = 0, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .num_dsc = 5, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 84, + .dpte_buffer_size_in_pte_reqs_chroma = 42,//todo + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 4, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 5, + .max_num_dpp = 5, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 8, + .max_vscl_ratio = 8, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 32, // + .dppclk_delay_subtotal = 77, // + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, // + .dcfclk_cstate_latency = 10, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .xfc_supported = true, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .ptoi_supported = 0 +}; + struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { /* Defaults that get patched on driver load from firmware. */ .clock_limits = { @@ -581,11 +644,13 @@ static const struct dcn2_dpp_registers tf_regs[] = { }; static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT) + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK) + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN10 }; #define dwbc_regs_dcn2(id)\ @@ -732,7 +797,42 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + default: + ASSERT(0); + return 0; + } +} + #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ @@ -754,7 +854,6 @@ static const struct dcn20_dsc_shift dsc_shift = { static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; -#endif static const struct dccg_registers dccg_regs = { DCCG_REG_LIST_DCN2() @@ -778,9 +877,7 @@ static const struct resource_caps res_cap_nv10 = { .num_dwb = 1, .num_ddc = 6, .num_vmid = 16, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 6, -#endif }; static const struct dc_plane_cap plane_cap = { @@ -814,8 +911,10 @@ static const struct resource_caps res_cap_nv14 = { .num_audio = 6, .num_stream_encoder = 5, .num_pll = 5, - .num_dwb = 0, + .num_dwb = 1, .num_ddc = 5, + .num_vmid = 16, + .num_dsc = 5, }; static const struct dc_debug_options debug_defaults_drv = { @@ -825,7 +924,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, @@ -922,7 +1021,10 @@ struct dce_aux *dcn20_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1042,14 +1144,18 @@ struct link_encoder *dcn20_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc20) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn20_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1107,6 +1213,11 @@ struct stream_encoder *dcn20_stream_encoder_create( if (!enc1) return NULL; + if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { + if (eng_id >= ENGINE_ID_DIGD) + eng_id++; + } + dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -1154,13 +1265,14 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn20_hwseq_create, }; +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + void dcn20_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) @@ -1183,9 +1295,8 @@ void dcn20_dsc_destroy(struct display_stream_compressor **dsc) *dsc = NULL; } -#endif -static void destruct(struct dcn20_resource_pool *pool) +static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) { unsigned int i; @@ -1196,12 +1307,10 @@ static void destruct(struct dcn20_resource_pool *pool) } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } -#endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -1294,6 +1403,8 @@ static void destruct(struct dcn20_resource_pool *pool) if (pool->base.pp_smu != NULL) dcn20_pp_smu_destroy(&pool->base.pp_smu); + if (pool->base.oem_device != NULL) + dal_ddc_service_destroy(&pool->base.oem_device); } struct hubp *dcn20_hubp_create( @@ -1344,7 +1455,7 @@ static void get_pixel_clock_parameters( if (opp_cnt == 4) pixel_clk_params->requested_pix_clk_100hz /= 4; - else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) + else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) @@ -1410,17 +1521,23 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state return status; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void acquire_dsc(struct resource_context *res_ctx, const struct resource_pool *pool, - struct display_stream_compressor **dsc) + struct display_stream_compressor **dsc, + int pipe_idx) { int i; ASSERT(*dsc == NULL); *dsc = NULL; + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + /* Find first free DSC */ for (i = 0; i < pool->res_cap->num_dsc; i++) if (!res_ctx->is_dsc_acquired[i]) { @@ -1444,10 +1561,8 @@ static void release_dsc(struct resource_context *res_ctx, } } -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static enum dc_status add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) @@ -1463,11 +1578,10 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; - acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i); /* The number of DSCs can be less than the number of pipes */ if (!pipe_ctx->stream_res.dsc) { - dm_output_to_console("No DSCs available\n"); result = DC_NO_DSC_RESOURCE; } @@ -1499,7 +1613,6 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, else return DC_OK; } -#endif enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) @@ -1511,11 +1624,9 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, if (result == DC_OK) result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); -#endif if (result == DC_OK) result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); @@ -1528,9 +1639,7 @@ enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ { enum dc_status result = DC_OK; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); -#endif return result; } @@ -1596,7 +1705,7 @@ static void swizzle_to_dml_params( } } -static bool dcn20_split_stream_for_odm( +bool dcn20_split_stream_for_odm( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, @@ -1613,11 +1722,8 @@ static bool dcn20_split_stream_for_odm( next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT next_odm_pipe->stream_res.dsc = NULL; -#endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { - ASSERT(!next_odm_pipe->next_odm_pipe); next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; } @@ -1662,19 +1768,17 @@ static bool dcn20_split_stream_for_odm( sd->recout.x = 0; } next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (next_odm_pipe->stream->timing.flags.DSC == 1) { - acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); + acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); ASSERT(next_odm_pipe->stream_res.dsc); if (next_odm_pipe->stream_res.dsc == NULL) return false; } -#endif return true; } -static void dcn20_split_stream_for_mpc( +void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *primary_pipe, @@ -1693,9 +1797,7 @@ static void dcn20_split_stream_for_mpc( secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT secondary_pipe->stream_res.dsc = NULL; -#endif if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { ASSERT(!secondary_pipe->bottom_pipe); secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; @@ -1747,10 +1849,11 @@ void dcn20_populate_dml_writeback_from_context( } int dcn20_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) { int pipe_cnt, i; bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1760,7 +1863,7 @@ int dcn20_populate_dml_pipes_from_context( pipe_cnt = i; continue; } - if (!resource_are_streams_timing_synchronizable( + if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; @@ -1770,25 +1873,26 @@ int dcn20_populate_dml_pipes_from_context( for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + unsigned int v_total; int output_bpc; if (!res_ctx->pipe_ctx[i].stream) continue; + + v_total = timing->v_total; /* todo: pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; pipes[pipe_cnt].pipe.src.dcc = 0; pipes[pipe_cnt].pipe.src.vm = 0;*/ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; /* todo: rotation?*/ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; -#endif if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; /* 1/2 vblank */ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = - (timing->v_total - timing->v_addressable + (v_total - timing->v_addressable - timing->v_border_top - timing->v_border_bottom) / 2; /* 36 bytes dp, 32 hdmi */ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = @@ -1802,13 +1906,13 @@ int dcn20_populate_dml_pipes_from_context( - timing->h_addressable - timing->h_border_left - timing->h_border_right; - pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch; + pipes[pipe_cnt].pipe.dest.vblank_start = v_total - timing->v_front_porch; pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom; pipes[pipe_cnt].pipe.dest.htotal = timing->h_total; - pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; + pipes[pipe_cnt].pipe.dest.vtotal = v_total; pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable; pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable; pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE; @@ -1871,14 +1975,12 @@ int dcn20_populate_dml_pipes_from_context( case COLOR_DEPTH_161616: output_bpc = 16; break; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case COLOR_DEPTH_999: output_bpc = 9; break; case COLOR_DEPTH_111111: output_bpc = 11; break; -#endif default: output_bpc = 8; break; @@ -1892,7 +1994,7 @@ int dcn20_populate_dml_pipes_from_context( break; case PIXEL_ENCODING_YCBCR420: pipes[pipe_cnt].dout.output_format = dm_420; - pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2; + pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; break; case PIXEL_ENCODING_YCBCR422: if (true) /* todo */ @@ -1906,6 +2008,9 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; + /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* @@ -1928,6 +2033,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable; if (pipes[pipe_cnt].pipe.src.viewport_height > 1080) pipes[pipe_cnt].pipe.src.viewport_height = 1080; + pipes[pipe_cnt].pipe.src.surface_height_y = pipes[pipe_cnt].pipe.src.viewport_height; pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ @@ -1942,8 +2048,8 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.scale_taps.vtaps = 1; pipes[pipe_cnt].pipe.src.is_hsplit = 0; pipes[pipe_cnt].pipe.dest.odm_combine = 0; - pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total; - pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total; + pipes[pipe_cnt].pipe.dest.vtotal_min = v_total; + pipes[pipe_cnt].pipe.dest.vtotal_max = v_total; } else { struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state; struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data; @@ -1961,6 +2067,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width; pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; + pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; @@ -2126,8 +2233,7 @@ void dcn20_set_mcif_arb_params( } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; @@ -2160,9 +2266,8 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } return true; } -#endif -static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) @@ -2202,7 +2307,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { - if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) { + if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { @@ -2238,29 +2344,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, return secondary_pipe; } -bool dcn20_fast_validate_bw( +void dcn20_merge_pipes_for_validate( struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out) + struct dc_state *context) { - bool out = false; - - int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; - bool odm_capable = context->bw_ctx.dml.ip.odm_capable; - bool force_split = false; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - bool failed_non_odm_dsc = false; -#endif - int split_threshold = dc->res_pool->pipe_count / 2; - bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - - - ASSERT(pipes); - if (!pipes) - return false; + int i; /* merge previously split odm pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2280,10 +2368,8 @@ bool dcn20_fast_validate_bw( odm_pipe->bottom_pipe = NULL; odm_pipe->prev_odm_pipe = NULL; odm_pipe->next_odm_pipe = NULL; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT if (odm_pipe->stream_res.dsc) release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); -#endif /* Clear plane_res and stream_res */ memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); @@ -2315,51 +2401,19 @@ bool dcn20_fast_validate_bw( if (pipe->plane_state) resource_build_scaling_params(pipe); } +} - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); - else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); - - *pipe_cnt_out = pipe_cnt; - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - context->bw_ctx.dml.ip.odm_capable = 0; - - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.ip.odm_capable = odm_capable; - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - /* 1 dsc per stream dsc validation */ - if (vlevel <= context->bw_ctx.dml.soc.num_states) - if (!dcn20_validate_dsc(dc, context)) { - failed_non_odm_dsc = true; - vlevel = context->bw_ctx.dml.soc.num_states + 1; - } -#endif - - if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable) - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (vlevel > context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold) - || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold)) - context->commit_hints.full_update_needed = true; - - /*initialize pipe_just_split_from to invalid idx*/ - for (i = 0; i < MAX_PIPES; i++) - pipe_split_from[i] = -1; +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split) +{ + int i, pipe_idx, vlevel_split; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - /* Single display only conditionals get set here */ + /* Single display loop, exits if there is more than one display */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; bool exit_loop = false; @@ -2386,38 +2440,107 @@ bool dcn20_fast_validate_bw( if (exit_loop) break; } - - if (context->stream_count > split_threshold) + /* TODO: fix dc bugs and remove this split threshold thing */ + if (context->stream_count > dc->res_pool->pipe_count / 2) avoid_split = true; - vlevel_unsplit = vlevel; + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + pipe_idx++; + } + context->bw_ctx.dml.vba.maxMpcComb = 0; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!context->res_ctx.pipe_ctx[i].stream) continue; - for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++) - if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1) - break; + + if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1) + split[i] = true; + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = true; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = true; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; + } + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; pipe_idx++; } + return vlevel; +} + +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out) +{ + bool out = false; + bool split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + + ASSERT(pipes); + if (!pipes) + return false; + + dcn20_merge_pipes_for_validate(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); + + *pipe_cnt_out = pipe_cnt; + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (vlevel > context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split); + + /*initialize pipe_just_split_from to invalid idx*/ + for (i = 0; i < MAX_PIPES; i++) + pipe_split_from[i] = -1; + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - bool need_split = true; - bool need_split3d; if (!pipe->stream || pipe_split_from[i] >= 0) continue; pipe_idx++; - if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { - force_split = true; - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; - } - if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); @@ -2435,40 +2558,26 @@ bool dcn20_fast_validate_bw( if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) continue; - need_split3d = ((pipe->stream->view_format == - VIEW_3D_FORMAT_SIDE_BY_SIDE || - pipe->stream->view_format == - VIEW_3D_FORMAT_TOP_AND_BOTTOM) && - (pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_TOP_AND_BOTTOM || - pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_SIDE_BY_SIDE)); - - if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) { - need_split = false; - vlevel = vlevel_unsplit; - context->bw_ctx.dml.vba.maxMpcComb = 0; - } else - need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2; - /* We do not support mpo + odm at the moment */ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) goto validate_fail; - if (need_split3d || need_split || force_split) { + if (split[i]) { if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { /* pipe not split previously needs split */ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe || force_split); - if (!hsplit_pipe) + ASSERT(hsplit_pipe); + if (!hsplit_pipe) { + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2; continue; - + } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, @@ -2480,14 +2589,12 @@ bool dcn20_fast_validate_bw( ASSERT(0); } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ - if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) { + if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; } -#endif *vlevel_out = vlevel; @@ -2501,7 +2608,7 @@ validate_out: return out; } -void dcn20_calculate_wm( +static void dcn20_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, @@ -2522,7 +2629,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipe_idx++; @@ -2531,7 +2638,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; } @@ -2551,10 +2658,10 @@ void dcn20_calculate_wm( if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); + context, pipes); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); + context, pipes); } *out_pipe_cnt = pipe_cnt; @@ -2574,6 +2681,9 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; @@ -2585,6 +2695,8 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; @@ -2596,6 +2708,8 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2605,6 +2719,8 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; } void dcn20_calculate_dlg_params( @@ -2624,7 +2740,7 @@ void dcn20_calculate_dlg_params( context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; - context->bw_ctx.bw.dcn.clk.fclk_khz = 0; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; @@ -2640,8 +2756,8 @@ void dcn20_calculate_dlg_params( continue; if (!visited[pipe_idx]) { - display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src; - display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest; + display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src; + display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest; dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; @@ -2771,6 +2887,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool full_pstate_supported = false; bool dummy_pstate_supported = false; double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support; if (fast_validate) return dcn20_validate_bandwidth_internal(dc, context, true); @@ -2801,7 +2918,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, ASSERT(false); restore_dml_state: - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; return voltage_supported; @@ -2848,7 +2964,7 @@ static void dcn20_destroy_resource_pool(struct resource_pool **pool) { struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); - destruct(dcn20_pool); + dcn20_resource_destruct(dcn20_pool); kfree(dcn20_pool); *pool = NULL; } @@ -2887,6 +3003,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; @@ -2895,8 +3012,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) int i; uint32_t pipe_count = pool->res_cap->num_dwb; - ASSERT(pipe_count > 0); - for (i = 0; i < pipe_count; i++) { struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), GFP_KERNEL); @@ -2942,7 +3057,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); @@ -2957,7 +3072,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) return pp_smu; } -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -2965,7 +3080,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) } } -static void cap_soc_clocks( +void dcn20_cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks) { @@ -3032,10 +3147,10 @@ static void cap_soc_clocks( } } -static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; + struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; @@ -3043,12 +3158,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (num_states == 0) return; + memset(calculated_states, 0, sizeof(calculated_states)); + if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; - else - // Accounting for SOC/DCF relationship, we can go as high as - // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. - min_dcfclk = 507; + else { + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) + min_dcfclk = 310; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. + min_dcfclk = 506; + } for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; @@ -3088,7 +3209,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ bb->clock_limits[num_calculated_states].state = bb->num_states; } -static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns @@ -3129,6 +3250,10 @@ static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params( uint32_t hw_internal_rev) { + /* NV14 */ + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_ip; + /* NV12 and NV10 */ return &dcn2_0_ip; } @@ -3152,12 +3277,13 @@ static bool init_soc_bounding_box(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - if (!bb && !SOC_BOUNDING_BOX_VALID) { + /* TODO: upstream NV12 bounding box when its launched */ + if (!bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); return false; } - if (bb && !SOC_BOUNDING_BOX_VALID) { + if (bb && ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { int i; dcn2_0_nv12_soc.sr_exit_time_us = @@ -3287,19 +3413,19 @@ static bool init_soc_bounding_box(struct dc *dc, } if (clock_limits_available && uclk_states_available && num_states) - update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); + dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); else if (clock_limits_available) - cap_soc_clocks(loaded_bb, max_clocks); + dcn20_cap_soc_clocks(loaded_bb, max_clocks); } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; - patch_bounding_box(dc, loaded_bb); + dcn20_patch_bounding_box(dc, loaded_bb); return true; } -static bool construct( +static bool dcn20_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn20_resource_pool *pool) @@ -3307,6 +3433,7 @@ static bool construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = @@ -3340,6 +3467,7 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.hw_3d_lut = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; @@ -3565,7 +3693,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn20_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { @@ -3574,7 +3701,6 @@ static bool construct( goto create_fail; } } -#endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); @@ -3601,11 +3727,22 @@ static bool construct( dc->cap_funcs = cap_funcs; + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dal_ddc_service_create(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + return true; create_fail: - destruct(pool); + dcn20_resource_destruct(pool); return false; } @@ -3620,7 +3757,7 @@ struct resource_pool *dcn20_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 44f95aa0d61e..840ca66c34e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -50,7 +50,7 @@ unsigned int dcn20_calc_max_scaled_time( enum mmhubbub_wbif_mode mode, unsigned int urgent_watermark); int dcn20_populate_dml_pipes_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, @@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst); void dcn20_dsc_destroy(struct display_stream_compressor **dsc); -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx); -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); - +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); +void dcn20_cap_soc_clocks( + struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table max_clocks); +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states); struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst); @@ -116,6 +119,29 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +void dcn20_merge_pipes_for_validate( + struct dc *dc, + struct dc_state *context); +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split); +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); +void dcn20_split_stream_for_mpc( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe); +bool dcn20_split_stream_for_odm( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *prev_odm_pipe, + struct pipe_ctx *next_odm_pipe); +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe); bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 5ab9d6240498..9b70a1e7b962 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -205,7 +205,6 @@ static void enc2_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC7_LINE, 0); } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Update GSP7 SDP 128 byte long */ static void enc2_update_gsp7_128_info_packet( @@ -360,7 +359,6 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s) REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable); } } -#endif /* Set Dynamic Metadata-configuration. * enable_dme: TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME @@ -440,10 +438,8 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 && !timing->dsc_cfg.ycbcr422_simple); -#endif return two_pix; } @@ -492,15 +488,23 @@ void enc2_stream_encoder_dp_unblank( DP_VID_N_MUL, n_multiply); } - /* set DIG_START to 0x1 to reset FIFO */ + /* make sure stream is disabled before resetting steer fifo */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false); + REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000); + /* set DIG_START to 0x1 to reset FIFO */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(1); /* write 0 to take the FIFO out of reset */ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); - /* switch DP encoder to CRTC data */ + /* switch DP encoder to CRTC data, but reset it the fifo first. It may happen + * that it overflows during mode transition, and sometimes doesn't recover. + */ + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1); + udelay(10); REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); @@ -533,11 +537,16 @@ void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting); + enc1_stream_encoder_dp_set_stream_attribute(enc, + crtc_timing, + output_color_space, + use_vsc_sdp_for_colorimetry, + enable_sdp_splitting); REG_UPDATE(DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, enable_sdp_splitting); @@ -560,6 +569,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc2_stream_encoder_stop_hdmi_info_packets, .update_dp_info_packets = enc2_stream_encoder_update_dp_info_packets, + .send_immediate_sdp_message = + enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, .dp_blank = @@ -578,11 +589,13 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + + .dp_get_pixel_format = + enc1_stream_encoder_dp_get_pixel_format, + .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, -#endif .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index 3f94a9f13c4a..d2a805bd4573 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -98,6 +98,7 @@ void enc2_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void enc2_stream_encoder_dp_unblank( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ef673bffc241..4763721fb1c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,17 +1,24 @@ +# SPDX-License-Identifier: MIT # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o +DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ + dcn21_hwseq.o dcn21_link_encoder.o -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4 +else CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index d1266741763b..f546260c15b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include <linux/delay.h> #include "dm_services.h" #include "dcn20/dcn20_hubbub.h" #include "dcn21_hubbub.h" @@ -51,7 +52,7 @@ #ifdef NUM_VMID #undef NUM_VMID #endif -#define NUM_VMID 1 +#define NUM_VMID 16 static uint32_t convert_and_clamp( uint32_t wm_ns, @@ -71,56 +72,76 @@ static uint32_t convert_and_clamp( void dcn21_dchvm_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; //Init DCHVM block REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); //Poll until RIOMMU_ACTIVE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100); + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); - //Reflect the power status of DCHUBBUB - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); - //Start rIOMMU prefetching - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); - // Enable dynamic clock gating - REG_UPDATE_4(DCHVM_CLK_CTRL, - HVM_DISPCLK_R_GATE_DIS, 0, - HVM_DISPCLK_G_GATE_DIS, 0, - HVM_DCFCLK_R_GATE_DIS, 0, - HVM_DCFCLK_G_GATE_DIS, 0); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); - //Poll until HOSTVM_PREFETCH_DONE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + } } -static int hubbub21_init_dchub(struct hubbub *hubbub, +int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } dcn21_dchvm_init(hubbub); return NUM_VMID; } -static void hubbub21_program_urgent_watermarks( +void hubbub21_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { + hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { @@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { + hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } + /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; @@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { + hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } + /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; @@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); } + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { + hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } } -static void hubbub21_program_stutter_watermarks( +void hubbub21_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks( } } -static void hubbub21_program_pstate_watermarks( +void hubbub21_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); } +void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); +} static const struct hubbub_funcs hubbub21_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub21_init_dchub, - .init_vm_ctx = NULL, + .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub2_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub21_program_watermarks, + .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, }; void hubbub21_construct(struct dcn20_hubbub *hubbub, @@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index 6ff3cdb89178..c4840dfb1fa5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -36,6 +36,10 @@ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ SR(DCHVM_CTRL0), \ SR(DCHVM_MEM_CTRL), \ @@ -44,16 +48,9 @@ SR(DCHVM_RIOMMU_STAT0) #define HUBBUB_REG_LIST_DCN21()\ - HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_REG_LIST_DCN20_COMMON(), \ HUBBUB_SR_WATERMARK_REG_LIST(), \ - HUBBUB_HVM_REG_LIST(), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE),\ - SR(DCN_VM_FB_LOCATION_TOP),\ - SR(DCN_VM_FB_OFFSET),\ - SR(DCN_VM_AGP_BOT),\ - SR(DCN_VM_AGP_TOP),\ - SR(DCN_VM_AGP_BASE) + HUBBUB_HVM_REG_LIST() #define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \ @@ -102,7 +99,7 @@ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh) #define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\ - HUBBUB_MASK_SH_LIST_HVM(mask_sh),\ + HUBBUB_MASK_SH_LIST_HVM(mask_sh), \ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -114,11 +111,28 @@ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh) void dcn21_dchvm_init(struct hubbub *hubbub); +int hubbub21_init_dchub(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); void hubbub21_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); +void hubbub21_program_urgent_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_stutter_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_pstate_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); void hubbub21_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index a00af513aa2b..332bf3d3a664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -22,11 +22,15 @@ * Authors: AMD * */ + +#include "dcn10/dcn10_hubp.h" #include "dcn21_hubp.h" #include "dm_services.h" #include "reg_helper.h" +#define DC_LOGGER_INIT(logger) + #define REG(reg)\ hubp21->hubp_regs->reg @@ -162,6 +166,72 @@ static void hubp21_setup( } +void hubp21_set_viewport( + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c, + enum dc_rotation_angle rotation) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + int patched_viewport_height = 0; + struct dc_debug_options *debug = &hubp->ctx->dc->debug; + + REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0, + PRI_VIEWPORT_WIDTH, viewport->width, + PRI_VIEWPORT_HEIGHT, viewport->height); + + REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0, + PRI_VIEWPORT_X_START, viewport->x, + PRI_VIEWPORT_Y_START, viewport->y); + + /*for stereo*/ + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0, + SEC_VIEWPORT_WIDTH, viewport->width, + SEC_VIEWPORT_HEIGHT, viewport->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0, + SEC_VIEWPORT_X_START, viewport->x, + SEC_VIEWPORT_Y_START, viewport->y); + + /* + * Work around for underflow issue with NV12 + rIOMMU translation + * + immediate flip. This will cause hubp underflow, but will not + * be user visible since underflow is in blank region + * Disable w/a when rotated 180 degrees, causes vertical chroma offset + */ + patched_viewport_height = viewport_c->height; + if (debug->nv12_iflip_vm_wa && viewport_c->height > 512 && + rotation != ROTATION_ANGLE_180) { + int pte_row_height = 0; + int pte_rows = 0; + + REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, + PTE_ROW_HEIGHT_LINEAR_C, &pte_row_height); + + pte_row_height = 1 << (pte_row_height + 3); + pte_rows = (viewport_c->height / pte_row_height) + 1; + patched_viewport_height = pte_rows * pte_row_height + 1; + } + + + /* DC supports NV12 only at the moment */ + REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0, + PRI_VIEWPORT_WIDTH_C, viewport_c->width, + PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); + + REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, + PRI_VIEWPORT_X_START_C, viewport_c->x, + PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, patched_viewport_height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); +} + void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, struct vm_system_aperture_param *apt) { @@ -189,6 +259,349 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, SYSTEM_ACCESS_MODE, 0x3); } +void hubp21_validate_dml_output(struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr) +{ + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); + struct _vcs_dpi_display_rq_regs_st rq_regs = {0}; + struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0}; + struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0}; + DC_LOGGER_INIT(ctx->logger); + DC_LOG_DEBUG("DML Validation | Running Validation"); + + /* Requester - Per hubp */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs.plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs.drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs.prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs.mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs.crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs.rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs.rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs.rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs.rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs.rq_regs_l.dpte_group_size, + VM_GROUP_SIZE, &rq_regs.rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs.rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs.rq_regs_l.pte_row_height_linear); + REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs.rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs.rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs.rq_regs_c.dpte_group_size, + SWATH_HEIGHT_C, &rq_regs.rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs.rq_regs_c.pte_row_height_linear); + + if (rq_regs.plane1_base_address != dml_rq_regs->plane1_base_address) + DC_LOG_DEBUG("DML Validation | HUBPRET_CONTROL:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->plane1_base_address, rq_regs.plane1_base_address); + if (rq_regs.drq_expansion_mode != dml_rq_regs->drq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->drq_expansion_mode, rq_regs.drq_expansion_mode); + if (rq_regs.prq_expansion_mode != dml_rq_regs->prq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:MRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->prq_expansion_mode, rq_regs.prq_expansion_mode); + if (rq_regs.mrq_expansion_mode != dml_rq_regs->mrq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:DET_BUF_PLANE1_BASE_ADDRESS - Expected: %u Actual: %u\n", + dml_rq_regs->mrq_expansion_mode, rq_regs.mrq_expansion_mode); + if (rq_regs.crq_expansion_mode != dml_rq_regs->crq_expansion_mode) + DC_LOG_DEBUG("DML Validation | DCN_EXPANSION_MODE:CRQ_EXPANSION_MODE - Expected: %u Actual: %u\n", + dml_rq_regs->crq_expansion_mode, rq_regs.crq_expansion_mode); + + if (rq_regs.rq_regs_l.chunk_size != dml_rq_regs->rq_regs_l.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.chunk_size, rq_regs.rq_regs_l.chunk_size); + if (rq_regs.rq_regs_l.min_chunk_size != dml_rq_regs->rq_regs_l.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_chunk_size, rq_regs.rq_regs_l.min_chunk_size); + if (rq_regs.rq_regs_l.meta_chunk_size != dml_rq_regs->rq_regs_l.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.meta_chunk_size, rq_regs.rq_regs_l.meta_chunk_size); + if (rq_regs.rq_regs_l.min_meta_chunk_size != dml_rq_regs->rq_regs_l.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:MIN_META_CHUNK_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs.rq_regs_l.min_meta_chunk_size); + if (rq_regs.rq_regs_l.dpte_group_size != dml_rq_regs->rq_regs_l.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:DPTE_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.dpte_group_size, rq_regs.rq_regs_l.dpte_group_size); + if (rq_regs.rq_regs_l.mpte_group_size != dml_rq_regs->rq_regs_l.mpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:VM_GROUP_SIZE - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.mpte_group_size, rq_regs.rq_regs_l.mpte_group_size); + if (rq_regs.rq_regs_l.swath_height != dml_rq_regs->rq_regs_l.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:SWATH_HEIGHT - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.swath_height, rq_regs.rq_regs_l.swath_height); + if (rq_regs.rq_regs_l.pte_row_height_linear != dml_rq_regs->rq_regs_l.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG_C:PTE_ROW_HEIGHT_LINEAR - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_l.pte_row_height_linear, rq_regs.rq_regs_l.pte_row_height_linear); + + if (rq_regs.rq_regs_c.chunk_size != dml_rq_regs->rq_regs_c.chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.chunk_size, rq_regs.rq_regs_c.chunk_size); + if (rq_regs.rq_regs_c.min_chunk_size != dml_rq_regs->rq_regs_c.min_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_chunk_size, rq_regs.rq_regs_c.min_chunk_size); + if (rq_regs.rq_regs_c.meta_chunk_size != dml_rq_regs->rq_regs_c.meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.meta_chunk_size, rq_regs.rq_regs_c.meta_chunk_size); + if (rq_regs.rq_regs_c.min_meta_chunk_size != dml_rq_regs->rq_regs_c.min_meta_chunk_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:MIN_META_CHUNK_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.min_meta_chunk_size, rq_regs.rq_regs_c.min_meta_chunk_size); + if (rq_regs.rq_regs_c.dpte_group_size != dml_rq_regs->rq_regs_c.dpte_group_size) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:DPTE_GROUP_SIZE_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.dpte_group_size, rq_regs.rq_regs_c.dpte_group_size); + if (rq_regs.rq_regs_c.swath_height != dml_rq_regs->rq_regs_c.swath_height) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:SWATH_HEIGHT_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.swath_height, rq_regs.rq_regs_c.swath_height); + if (rq_regs.rq_regs_c.pte_row_height_linear != dml_rq_regs->rq_regs_c.pte_row_height_linear) + DC_LOG_DEBUG("DML Validation | DCHUBP_REQ_SIZE_CONFIG:PTE_ROW_HEIGHT_LINEAR_C - Expected: %u Actual: %u\n", + dml_rq_regs->rq_regs_c.pte_row_height_linear, rq_regs.rq_regs_c.pte_row_height_linear); + + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr.refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr.dlg_vblank_end); + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr.min_dst_y_next_start); + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr.refcyc_per_htotal); + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr.refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr.dst_y_after_scaler); + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr.ref_freq_to_pix_freq); + + if (dlg_attr.refcyc_h_blank_end != dml_dlg_attr->refcyc_h_blank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:REFCYC_H_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_h_blank_end, dlg_attr.refcyc_h_blank_end); + if (dlg_attr.dlg_vblank_end != dml_dlg_attr->dlg_vblank_end) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_0:DLG_V_BLANK_END - Expected: %u Actual: %u\n", + dml_dlg_attr->dlg_vblank_end, dlg_attr.dlg_vblank_end); + if (dlg_attr.min_dst_y_next_start != dml_dlg_attr->min_dst_y_next_start) + DC_LOG_DEBUG("DML Validation | BLANK_OFFSET_1:MIN_DST_Y_NEXT_START - Expected: %u Actual: %u\n", + dml_dlg_attr->min_dst_y_next_start, dlg_attr.min_dst_y_next_start); + if (dlg_attr.refcyc_per_htotal != dml_dlg_attr->refcyc_per_htotal) + DC_LOG_DEBUG("DML Validation | DST_DIMENSIONS:REFCYC_PER_HTOTAL - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_htotal, dlg_attr.refcyc_per_htotal); + if (dlg_attr.refcyc_x_after_scaler != dml_dlg_attr->refcyc_x_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:REFCYC_X_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_x_after_scaler, dlg_attr.refcyc_x_after_scaler); + if (dlg_attr.dst_y_after_scaler != dml_dlg_attr->dst_y_after_scaler) + DC_LOG_DEBUG("DML Validation | DST_AFTER_SCALER:DST_Y_AFTER_SCALER - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_after_scaler, dlg_attr.dst_y_after_scaler); + if (dlg_attr.ref_freq_to_pix_freq != dml_dlg_attr->ref_freq_to_pix_freq) + DC_LOG_DEBUG("DML Validation | REF_FREQ_TO_PIX_FREQ:REF_FREQ_TO_PIX_FREQ - Expected: %u Actual: %u\n", + dml_dlg_attr->ref_freq_to_pix_freq, dlg_attr.ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr.refcyc_per_pte_group_vblank_l); + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr.dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr.refcyc_per_pte_group_nom_l); + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr.dst_y_per_meta_row_nom_l); + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr.refcyc_per_meta_chunk_nom_l); + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr.refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr.refcyc_per_line_delivery_c); + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr.refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr.refcyc_per_line_delivery_pre_c); + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr.refcyc_per_pte_group_vblank_c); + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr.dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr.refcyc_per_pte_group_nom_c); + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr.dst_y_per_meta_row_nom_c); + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr.refcyc_per_meta_chunk_nom_c); + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr.refcyc_per_meta_chunk_vblank_l); + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr.refcyc_per_meta_chunk_vblank_c); + + if (dlg_attr.refcyc_per_pte_group_vblank_l != dml_dlg_attr->refcyc_per_pte_group_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_1:REFCYC_PER_PTE_GROUP_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_l, dlg_attr.refcyc_per_pte_group_vblank_l); + if (dlg_attr.dst_y_per_pte_row_nom_l != dml_dlg_attr->dst_y_per_pte_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_0:DST_Y_PER_PTE_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_l, dlg_attr.dst_y_per_pte_row_nom_l); + if (dlg_attr.refcyc_per_pte_group_nom_l != dml_dlg_attr->refcyc_per_pte_group_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_1:REFCYC_PER_PTE_GROUP_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_l, dlg_attr.refcyc_per_pte_group_nom_l); + if (dlg_attr.dst_y_per_meta_row_nom_l != dml_dlg_attr->dst_y_per_meta_row_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_4:DST_Y_PER_META_ROW_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_l, dlg_attr.dst_y_per_meta_row_nom_l); + if (dlg_attr.refcyc_per_meta_chunk_nom_l != dml_dlg_attr->refcyc_per_meta_chunk_nom_l) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_5:REFCYC_PER_META_CHUNK_NOM_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_l, dlg_attr.refcyc_per_meta_chunk_nom_l); + if (dlg_attr.refcyc_per_line_delivery_l != dml_dlg_attr->refcyc_per_line_delivery_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_l, dlg_attr.refcyc_per_line_delivery_l); + if (dlg_attr.refcyc_per_line_delivery_c != dml_dlg_attr->refcyc_per_line_delivery_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY:REFCYC_PER_LINE_DELIVERY_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_c, dlg_attr.refcyc_per_line_delivery_c); + if (dlg_attr.refcyc_per_pte_group_vblank_c != dml_dlg_attr->refcyc_per_pte_group_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_2:REFCYC_PER_PTE_GROUP_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_vblank_c, dlg_attr.refcyc_per_pte_group_vblank_c); + if (dlg_attr.dst_y_per_pte_row_nom_c != dml_dlg_attr->dst_y_per_pte_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_2:DST_Y_PER_PTE_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_pte_row_nom_c, dlg_attr.dst_y_per_pte_row_nom_c); + if (dlg_attr.refcyc_per_pte_group_nom_c != dml_dlg_attr->refcyc_per_pte_group_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_3:REFCYC_PER_PTE_GROUP_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_nom_c, dlg_attr.refcyc_per_pte_group_nom_c); + if (dlg_attr.dst_y_per_meta_row_nom_c != dml_dlg_attr->dst_y_per_meta_row_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_6:DST_Y_PER_META_ROW_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->dst_y_per_meta_row_nom_c, dlg_attr.dst_y_per_meta_row_nom_c); + if (dlg_attr.refcyc_per_meta_chunk_nom_c != dml_dlg_attr->refcyc_per_meta_chunk_nom_c) + DC_LOG_DEBUG("DML Validation | NOM_PARAMETERS_7:REFCYC_PER_META_CHUNK_NOM_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_nom_c, dlg_attr.refcyc_per_meta_chunk_nom_c); + if (dlg_attr.refcyc_per_line_delivery_pre_l != dml_dlg_attr->refcyc_per_line_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_l, dlg_attr.refcyc_per_line_delivery_pre_l); + if (dlg_attr.refcyc_per_line_delivery_pre_c != dml_dlg_attr->refcyc_per_line_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | PER_LINE_DELIVERY_PRE:REFCYC_PER_LINE_DELIVERY_PRE_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_line_delivery_pre_c, dlg_attr.refcyc_per_line_delivery_pre_c); + if (dlg_attr.refcyc_per_meta_chunk_vblank_l != dml_dlg_attr->refcyc_per_meta_chunk_vblank_l) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_3:REFCYC_PER_META_CHUNK_VBLANK_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_l, dlg_attr.refcyc_per_meta_chunk_vblank_l); + if (dlg_attr.refcyc_per_meta_chunk_vblank_c != dml_dlg_attr->refcyc_per_meta_chunk_vblank_c) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_4:REFCYC_PER_META_CHUNK_VBLANK_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_vblank_c, dlg_attr.refcyc_per_meta_chunk_vblank_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr.qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr.qos_level_high_wm); + + if (ttu_attr.qos_level_low_wm != dml_ttu_attr->qos_level_low_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_LOW_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_low_wm, ttu_attr.qos_level_low_wm); + if (ttu_attr.qos_level_high_wm != dml_ttu_attr->qos_level_high_wm) + DC_LOG_DEBUG("DML Validation | DCN_TTU_QOS_WM:QoS_LEVEL_HIGH_WM - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_high_wm, ttu_attr.qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_l); + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_c); + REG_GET_3(DCN_CUR0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr.refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, &ttu_attr.qos_level_fixed_cur0, + QoS_RAMP_DISABLE, &ttu_attr.qos_ramp_disable_cur0); + REG_GET(FLIP_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_FLIP_L, &dlg_attr.refcyc_per_pte_group_flip_l); + REG_GET(DCN_CUR0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur0); + REG_GET(DCN_CUR1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_cur1); + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_l); + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, &ttu_attr.refcyc_per_req_delivery_pre_c); + + if (ttu_attr.refcyc_per_req_delivery_l != dml_ttu_attr->refcyc_per_req_delivery_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_l, ttu_attr.refcyc_per_req_delivery_l); + if (ttu_attr.qos_level_fixed_l != dml_ttu_attr->qos_level_fixed_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_l, ttu_attr.qos_level_fixed_l); + if (ttu_attr.qos_ramp_disable_l != dml_ttu_attr->qos_ramp_disable_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_l, ttu_attr.qos_ramp_disable_l); + if (ttu_attr.refcyc_per_req_delivery_c != dml_ttu_attr->refcyc_per_req_delivery_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_c, ttu_attr.refcyc_per_req_delivery_c); + if (ttu_attr.qos_level_fixed_c != dml_ttu_attr->qos_level_fixed_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_c, ttu_attr.qos_level_fixed_c); + if (ttu_attr.qos_ramp_disable_c != dml_ttu_attr->qos_ramp_disable_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_c, ttu_attr.qos_ramp_disable_c); + if (ttu_attr.refcyc_per_req_delivery_cur0 != dml_ttu_attr->refcyc_per_req_delivery_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:REFCYC_PER_REQ_DELIVERY - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_cur0, ttu_attr.refcyc_per_req_delivery_cur0); + if (ttu_attr.qos_level_fixed_cur0 != dml_ttu_attr->qos_level_fixed_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_LEVEL_FIXED - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_level_fixed_cur0, ttu_attr.qos_level_fixed_cur0); + if (ttu_attr.qos_ramp_disable_cur0 != dml_ttu_attr->qos_ramp_disable_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL0:QoS_RAMP_DISABLE - Expected: %u Actual: %u\n", + dml_ttu_attr->qos_ramp_disable_cur0, ttu_attr.qos_ramp_disable_cur0); + if (dlg_attr.refcyc_per_pte_group_flip_l != dml_dlg_attr->refcyc_per_pte_group_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_1:REFCYC_PER_PTE_GROUP_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_l, dlg_attr.refcyc_per_pte_group_flip_l); + if (ttu_attr.refcyc_per_req_delivery_pre_cur0 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur0) + DC_LOG_DEBUG("DML Validation | DCN_CUR0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur0, ttu_attr.refcyc_per_req_delivery_pre_cur0); + if (ttu_attr.refcyc_per_req_delivery_pre_cur1 != dml_ttu_attr->refcyc_per_req_delivery_pre_cur1) + DC_LOG_DEBUG("DML Validation | DCN_CUR1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_cur1, ttu_attr.refcyc_per_req_delivery_pre_cur1); + if (ttu_attr.refcyc_per_req_delivery_pre_l != dml_ttu_attr->refcyc_per_req_delivery_pre_l) + DC_LOG_DEBUG("DML Validation | DCN_SURF0_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_l, ttu_attr.refcyc_per_req_delivery_pre_l); + if (ttu_attr.refcyc_per_req_delivery_pre_c != dml_ttu_attr->refcyc_per_req_delivery_pre_c) + DC_LOG_DEBUG("DML Validation | DCN_SURF1_TTU_CNTL1:REFCYC_PER_REQ_DELIVERY_PRE - Expected: %u Actual: %u\n", + dml_ttu_attr->refcyc_per_req_delivery_pre_c, ttu_attr.refcyc_per_req_delivery_pre_c); + + /* Host VM deadline regs */ + REG_GET(VBLANK_PARAMETERS_5, + REFCYC_PER_VM_GROUP_VBLANK, &dlg_attr.refcyc_per_vm_group_vblank); + REG_GET(VBLANK_PARAMETERS_6, + REFCYC_PER_VM_REQ_VBLANK, &dlg_attr.refcyc_per_vm_req_vblank); + REG_GET(FLIP_PARAMETERS_3, + REFCYC_PER_VM_GROUP_FLIP, &dlg_attr.refcyc_per_vm_group_flip); + REG_GET(FLIP_PARAMETERS_4, + REFCYC_PER_VM_REQ_FLIP, &dlg_attr.refcyc_per_vm_req_flip); + REG_GET(FLIP_PARAMETERS_5, + REFCYC_PER_PTE_GROUP_FLIP_C, &dlg_attr.refcyc_per_pte_group_flip_c); + REG_GET(FLIP_PARAMETERS_6, + REFCYC_PER_META_CHUNK_FLIP_C, &dlg_attr.refcyc_per_meta_chunk_flip_c); + REG_GET(FLIP_PARAMETERS_2, + REFCYC_PER_META_CHUNK_FLIP_L, &dlg_attr.refcyc_per_meta_chunk_flip_l); + + if (dlg_attr.refcyc_per_vm_group_vblank != dml_dlg_attr->refcyc_per_vm_group_vblank) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_5:REFCYC_PER_VM_GROUP_VBLANK - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_group_vblank, dlg_attr.refcyc_per_vm_group_vblank); + if (dlg_attr.refcyc_per_vm_req_vblank != dml_dlg_attr->refcyc_per_vm_req_vblank) + DC_LOG_DEBUG("DML Validation | VBLANK_PARAMETERS_6:REFCYC_PER_VM_REQ_VBLANK - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_req_vblank, dlg_attr.refcyc_per_vm_req_vblank); + if (dlg_attr.refcyc_per_vm_group_flip != dml_dlg_attr->refcyc_per_vm_group_flip) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_3:REFCYC_PER_VM_GROUP_FLIP - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_group_flip, dlg_attr.refcyc_per_vm_group_flip); + if (dlg_attr.refcyc_per_vm_req_flip != dml_dlg_attr->refcyc_per_vm_req_flip) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_4:REFCYC_PER_VM_REQ_FLIP - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_vm_req_flip, dlg_attr.refcyc_per_vm_req_flip); + if (dlg_attr.refcyc_per_pte_group_flip_c != dml_dlg_attr->refcyc_per_pte_group_flip_c) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_5:REFCYC_PER_PTE_GROUP_FLIP_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_pte_group_flip_c, dlg_attr.refcyc_per_pte_group_flip_c); + if (dlg_attr.refcyc_per_meta_chunk_flip_c != dml_dlg_attr->refcyc_per_meta_chunk_flip_c) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_6:REFCYC_PER_META_CHUNK_FLIP_C - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_flip_c, dlg_attr.refcyc_per_meta_chunk_flip_c); + if (dlg_attr.refcyc_per_meta_chunk_flip_l != dml_dlg_attr->refcyc_per_meta_chunk_flip_l) + DC_LOG_DEBUG("DML Validation | FLIP_PARAMETERS_2:REFCYC_PER_META_CHUNK_FLIP_L - Expected: %u Actual: %u\n", + dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l); +} + void hubp21_init(struct hubp *hubp) { // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta @@ -202,14 +615,14 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, - .hubp_program_surface_config = hubp2_program_surface_config, + .hubp_program_surface_config = hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp21_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings, .set_blank = hubp1_set_blank, .dcc_control = hubp1_dcc_control, - .mem_program_viewport = min_set_viewport, + .mem_program_viewport = hubp21_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, .set_cursor_position = hubp1_cursor_set_position, .hubp_clk_cntl = hubp1_clk_cntl, @@ -221,6 +634,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_clear_underflow = hubp1_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp21_init, + .validate_dml_output = hubp21_validate_dml_output, }; bool hubp21_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c new file mode 100644 index 000000000000..081ad8e43d58 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -0,0 +1,114 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dm_helpers.h" +#include "core_types.h" +#include "resource.h" +#include "dce/dce_hwseq.h" +#include "dcn21_hwseq.h" +#include "vmid.h" +#include "reg_helper.h" +#include "hw/clk_mgr.h" + + +#define DC_LOGGER_INIT(logger) + +#define CTX \ + hws->ctx +#define REG(reg)\ + hws->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hws->shifts->field_name, hws->masks->field_name + +/* Temporary read settings, future will get values from kmd directly */ +static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config, + struct dce_hwseq *hws) +{ + uint32_t page_table_base_hi; + uint32_t page_table_base_lo; + + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi); + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo); + + config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo; + +} + +int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +{ + struct dcn_hubbub_phys_addr_config config; + + config.system_aperture.fb_top = pa_config->system_aperture.fb_top; + config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; + config.system_aperture.fb_base = pa_config->system_aperture.fb_base; + config.system_aperture.agp_top = pa_config->system_aperture.agp_top; + config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; + config.system_aperture.agp_base = pa_config->system_aperture.agp_base; + config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; + config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; + config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + + mmhub_update_page_table_config(&config, hws); + + return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); +} + +// work around for Renoir s0i3, if register is programmed, bypass golden init. + +bool dcn21_s0i3_golden_init_wa(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t value = 0; + + value = REG_READ(MICROSECOND_TIME_BASE_DIV); + + return value != 0x00120464; +} + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); +} + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); +} + diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h new file mode 100644 index 000000000000..182736096123 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -0,0 +1,47 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN21_H__ +#define __DC_HWSS_DCN21_H__ + +#include "hw_sequencer_private.h" + +struct dc; + +int dcn21_init_sys_ctx(struct dce_hwseq *hws, + struct dc *dc, + struct dc_phy_addr_space_config *pa_config); + +bool dcn21_s0i3_golden_init_wa(struct dc *dc); + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context); + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context); + +#endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c new file mode 100644 index 000000000000..4861aa5c59ae --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -0,0 +1,142 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21_hwseq.h" + +static const struct hw_sequencer_funcs dcn21_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .pipe_control_lock_global = dcn20_pipe_control_lock_global, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn21_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, +}; + +static const struct hwseq_private_funcs dcn21_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .dsc_pg_control = NULL, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color, + .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn21_funcs; + dc->hwseq->funcs = dcn21_private_funcs; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->hwss.init_hw = dcn20_fpga_init_hw; + dc->hwseq->funcs.init_pipes = NULL; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h new file mode 100644 index 000000000000..3ed24292648a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN21_INIT_H__ +#define __DC_DCN21_INIT_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c new file mode 100644 index 000000000000..e45683ac871a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -0,0 +1,468 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include <linux/delay.h> +#include "core_types.h" +#include "link_encoder.h" +#include "dcn21_link_encoder.h" +#include "stream_encoder.h" + +#include "i2caux_interface.h" +#include "dc_bios_types.h" + +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + +#define IND_REG(index) \ + (enc10->link_regs->index) + +static struct mpll_cfg dcn21_mpll_cfg_ref[] = { + // RBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 238, + .mpllb_fracn_en = 0, + .mpllb_fracn_quot = 0, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 44237, + .mpllb_ssc_stepsize = 59454, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 2, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 2, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + // HBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 1, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR2 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR3 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 304, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 49152, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 55296, + .mpllb_ssc_stepsize = 74318, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 1, + .mpllb_ana_cp_int = 7, + .mpllb_ana_cp_prop = 16, + .hdmi_pixel_clk_div = 0, + }, +}; + + +static bool update_cfg_data( + struct dcn10_link_encoder *enc10, + const struct dc_link_settings *link_settings, + struct dpcssys_phy_seq_cfg *cfg) +{ + int i; + + cfg->load_sram_fw = false; + cfg->use_calibration_setting = true; + + //TODO: need to implement a proper lane mapping for Renoir. + for (i = 0; i < 4; i++) + cfg->lane_en[i] = true; + + switch (link_settings->link_rate) { + case LINK_RATE_LOW: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[0]; + break; + case LINK_RATE_HIGH: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[1]; + break; + case LINK_RATE_HIGH2: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[2]; + break; + case LINK_RATE_HIGH3: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[3]; + break; + default: + DC_LOG_ERROR("%s: No supported link rate found %X!\n", + __func__, link_settings->link_rate); + return false; + } + + return true; +} + +void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value); + + if (!value && link_settings->lane_count > LANE_COUNT_TWO) + link_settings->lane_count = LANE_COUNT_TWO; +} + +bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value); + + // if value == 1 alt mode is disabled, otherwise it is enabled + return !value; +} + +bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + int value; + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + + if (value == 1) { + ASSERT(0); + return false; + } + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 0); + + udelay(40); + + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + if (value == 1) { + ASSERT(0); + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + return false; + } + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1); + + return true; +} + + + +static void dcn21_link_encoder_release_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0); + +} + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10; + struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg; + + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { + dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source); + return; + } + + if (!update_cfg_data(enc10, link_settings, cfg)) + return; + + enc1_configure_encoder(enc10, link_settings); + + dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT); + +} + +void dcn21_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); +} + +void dcn21_link_encoder_disable_output( + struct link_encoder *enc, + enum signal_type signal) +{ + dcn10_link_encoder_disable_output(enc, signal); + + if (dc_is_dp_signal(signal)) + dcn21_link_encoder_release_phy(enc); +} + + +static const struct link_encoder_funcs dcn21_link_enc_funcs = { + .read_state = link_enc2_read_state, + .validate_output_with_stream = + dcn10_link_encoder_validate_output_with_stream, + .hw_init = enc2_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn21_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output, + .disable_output = dcn21_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy, + .fec_set_enable = enc2_fec_set_enable, + .fec_set_ready = enc2_fec_set_ready, + .fec_is_active = enc2_fec_is_active, + .get_dig_frontend = dcn10_get_dig_frontend, + .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn21_link_encoder_get_max_link_cap, +}; + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + struct dcn10_link_encoder *enc10 = &enc21->enc10; + + enc10->base.funcs = &dcn21_link_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc10->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc10->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc10->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc10->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc10->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc10->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.DP_IS_USB_C = + bp_cap_info.DP_IS_USB_C; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc10->base.ctx->dc->debug.hdmi20_disable) { + enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h new file mode 100644 index 000000000000..1d7a1a51f13d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -0,0 +1,61 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN21_H__ +#define __DC_LINK_ENCODER__DCN21_H__ + +#include "dcn20/dcn20_link_encoder.h" + +struct dcn21_link_encoder { + struct dcn10_link_encoder enc10; + struct dpcssys_phy_seq_cfg phy_seq_cfg; +}; + +#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \ + SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SR(RDPCSTX0_RDPCSTX_SCRATCH) + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source); + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index de182185fe1f..c865b95d5c0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -28,6 +28,8 @@ #include "dm_services.h" #include "dc.h" +#include "dcn21_init.h" + #include "resource.h" #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" @@ -42,11 +44,11 @@ #include "irq/dcn21/irq_service_dcn21.h" #include "dcn20/dcn20_dpp.h" #include "dcn20/dcn20_optc.h" -#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn20/dcn20_opp.h" #include "dcn20/dcn20_dsc.h" -#include "dcn20/dcn20_link_encoder.h" +#include "dcn21/dcn21_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" @@ -84,16 +86,13 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { - .gpuvm_enable = 0, - .hostvm_enable = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 2, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, -#else - .num_dsc = 0, -#endif .rob_buffer_size_kbytes = 168, .det_buffer_size_kbytes = 164, .dpte_buffer_size_in_pte_reqs_luma = 44, @@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 4, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, /*Extra state, no dispclk ramping*/ @@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 5, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, }, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -258,7 +257,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .vmm_page_size_bytes = 4096, .dram_clock_change_latency_us = 23.84, .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3550, + .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 1, @@ -350,19 +349,29 @@ static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_6) }; -#ifdef CONFIG_DRM_AMD_DC_DMUB -static const struct dcn21_dmcub_registers dmcub_regs = { - DMCUB_REG_LIST_DCN() +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN20_REG_LIST() }; -static const struct dcn21_dmcub_shift dmcub_shift = { - DMCUB_COMMON_MASK_SH_LIST_BASE(__SHIFT) +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) }; -static const struct dcn21_dmcub_mask dmcub_mask = { - DMCUB_COMMON_MASK_SH_LIST_BASE(_MASK) +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN20_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN20(_MASK) }; -#endif #define audio_regs(id)\ [id] = {\ @@ -529,7 +538,6 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ DSC_REG_LIST_DCN20(id)\ @@ -551,7 +559,6 @@ static const struct dcn20_dsc_shift dsc_shift = { static const struct dcn20_dsc_mask dsc_mask = { DSC_REG_LIST_SH_MASK_DCN20(_MASK) }; -#endif #define ipp_regs(id)\ [id] = {\ @@ -628,6 +635,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(4), }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) }; @@ -636,6 +651,11 @@ static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN20(_MASK) }; +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); + static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -683,7 +703,10 @@ static struct dce_aux *dcn21_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -726,14 +749,13 @@ static const struct resource_caps res_cap_rn = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, - .num_audio = 6, // 6 audio endpoints. 4 audio streams + .num_audio = 4, // 4 audio endpoints. 4 audio streams .num_stream_encoder = 5, .num_pll = 5, // maybe 3 because the last two used for USB-c .num_dwb = 1, .num_ddc = 5, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + .num_vmid = 1, .num_dsc = 3, -#endif }; #ifdef DIAGS_BUILD @@ -758,9 +780,7 @@ static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { .num_pll = 4, .num_dwb = 1, .num_ddc = 4, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 2, -#endif }; #endif @@ -796,15 +816,16 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 5120,/*upto 5K*/ + .max_downscale_src_width = 3840, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, - .disable_48mhz_pwrdwn = true, + .disable_48mhz_pwrdwn = false, + .nv12_iflip_vm_wa = true }; static const struct dc_debug_options debug_defaults_diags = { @@ -827,7 +848,7 @@ enum dcn20_clk_src_array_id { DCN20_CLK_SRC_TOTAL_DCN21 }; -static void destruct(struct dcn21_resource_pool *pool) +static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) { unsigned int i; @@ -838,12 +859,10 @@ static void destruct(struct dcn21_resource_pool *pool) } } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); } -#endif if (pool->base.mpc != NULL) { kfree(TO_DCN20_MPC(pool->base.mpc)); @@ -930,16 +949,11 @@ static void destruct(struct dcn21_resource_pool *pool) if (pool->base.dmcu != NULL) dce_dmcu_destroy(&pool->base.dmcu); -#ifdef CONFIG_DRM_AMD_DC_DMUB - if (pool->base.dmcub != NULL) - dcn21_dmcub_destroy(&pool->base.dmcub); -#endif - if (pool->base.dccg != NULL) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.pp_smu != NULL) - dcn20_pp_smu_destroy(&pool->base.pp_smu); + dcn21_pp_smu_destroy(&pool->base.pp_smu); } @@ -960,20 +974,54 @@ static void calculate_wm_set_for_vlevel( pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; -#endif + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; } +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +{ + int i; + + kernel_fpu_begin(); + if (dc->bb_overrides.sr_exit_time_ns) { + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = + dc->bb_overrides.sr_exit_time_ns / 1000.0; + } + } + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + } + + if (dc->bb_overrides.urgent_latency_ns) { + bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if (dc->bb_overrides.dram_clock_change_latency_ns) { + for (i = 0; i < WM_SET_COUNT; i++) { + dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + } + + kernel_fpu_end(); +} + void dcn21_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -988,6 +1036,8 @@ void dcn21_calculate_wm( ASSERT(bw_params); + patch_bounding_box(dc, &context->bw_ctx.dml.soc); + for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -1019,10 +1069,10 @@ void dcn21_calculate_wm( if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); + context, pipes); else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); + pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, + context, pipes); } *out_pipe_cnt = pipe_cnt; @@ -1112,7 +1162,7 @@ static void dcn21_destroy_resource_pool(struct resource_pool **pool) { struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool); - destruct(dcn21_pool); + dcn21_resource_destruct(dcn21_pool); kfree(dcn21_pool); *pool = NULL; } @@ -1251,7 +1301,6 @@ static void read_dce_straps( } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dcn21_dsc_create( struct dc_context *ctx, uint32_t inst) @@ -1267,10 +1316,15 @@ struct display_stream_compressor *dcn21_dsc_create( dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); return &dsc->base; } -#endif static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + /* + TODO: Fix this function to calcualte correct values. + There are known issues with this function currently + that will need to be investigated. Use hardcoded known good values for now. + + struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; int i; @@ -1278,7 +1332,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_soc.num_chans = bw_params->num_channels; - dcn2_1_soc.num_states = 0; for (i = 0; i < clk_table->num_entries; i++) { @@ -1286,10 +1339,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - /* This is probably wrong, TODO: find correct calculation */ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; - dcn2_1_soc.num_states++; } + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; + dcn2_1_soc.num_states = i; + */ } /* Temporary Place holder until we can get them from fuse */ @@ -1317,32 +1371,42 @@ static struct dpm_clocks dummy_clocks = { }; -enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, +static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { return PP_SMU_RESULT_OK; } -enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, +static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, struct dpm_clocks *clock_table) { *clock_table = dummy_clocks; return PP_SMU_RESULT_OK; } -struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); - pp_smu->ctx.ver = PP_SMU_VER_RN; + if (!pp_smu) + return pp_smu; + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) { + pp_smu->ctx.ver = PP_SMU_VER_RN; + pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; + pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + } else { - pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; - pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + dm_pp_get_funcs(ctx, pp_smu); + + if (pp_smu->ctx.ver != PP_SMU_VER_RN) + pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); + } return pp_smu; } -void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -1400,6 +1464,7 @@ static struct dce_hwseq *dcn21_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + hws->wa.DEGVIDCN21 = true; } return hws; } @@ -1418,10 +1483,153 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn21_hwseq_create, }; +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN10_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + default: + ASSERT(0); + return 0; + } +} + +static struct link_encoder *dcn21_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn21_link_encoder *enc21 = + kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc21) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn21_link_encoder_construct(enc21, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc21->enc10.base; +} +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes) +{ + uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes); + int i; + struct resource_context *res_ctx = &context->res_ctx; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + pipes[i].pipe.src.hostvm = 1; + pipes[i].pipe.src.gpuvm = 1; + } + + return pipe_cnt; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, - .link_enc_create = dcn20_link_encoder_create, + .link_enc_create = dcn21_link_encoder_create, .validate_bandwidth = dcn21_validate_bandwidth, + .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, @@ -1432,14 +1640,16 @@ static struct resource_funcs dcn21_res_pool_funcs = { .update_bw_bounding_box = update_bw_bounding_box }; -static bool construct( +static bool dcn21_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct dcn21_resource_pool *pool) { - int i; + int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes; ctx->dc_bios->regs = &bios_regs; @@ -1457,7 +1667,9 @@ static bool construct( *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = 4; + /* max pipe num for ASIC before check pipe fuses */ + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; @@ -1467,6 +1679,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1516,20 +1730,36 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DMUB - pool->base.dmcub = dcn21_dmcub_create(ctx, - &dmcub_regs, - &dmcub_shift, - &dmcub_mask); - if (pool->base.dmcub == NULL) { - dm_error("DC: failed to create dmcub!\n"); + pool->base.dmcu = dcn21_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); BREAK_TO_DEBUGGER(); goto create_fail; } -#endif pool->base.pp_smu = dcn21_pp_smu_create(ctx); + num_pipes = dcn2_1_ip.max_num_dpp; + + for (i = 0; i < dcn2_1_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + dcn2_1_ip.max_num_dpp = num_pipes; + dcn2_1_ip.max_num_otg = num_pipes; + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); init_data.ctx = dc->ctx; @@ -1537,8 +1767,15 @@ static bool construct( if (!pool->base.irqs) goto create_fail; + j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) + continue; + pool->base.hubps[i] = dcn21_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1562,6 +1799,23 @@ static bool construct( "DC: failed to create dpps!\n"); goto create_fail; } + + pool->base.opps[i] = dcn21_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[i] = dcn21_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { @@ -1582,27 +1836,9 @@ static bool construct( pool->base.sw_i2cs[i] = NULL; } - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn21_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn21_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - - pool->base.timing_generator_count = i; + pool->base.timing_generator_count = j; + pool->base.pipe_count = j; + pool->base.mpcc_count = j; pool->base.mpc = dcn21_mpc_create(ctx); if (pool->base.mpc == NULL) { @@ -1618,7 +1854,6 @@ static bool construct( goto create_fail; } -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT for (i = 0; i < pool->base.res_cap->num_dsc; i++) { pool->base.dscs[i] = dcn21_dsc_create(ctx, i); if (pool->base.dscs[i] == NULL) { @@ -1627,7 +1862,6 @@ static bool construct( goto create_fail; } } -#endif if (!dcn20_dwbc_create(ctx, &pool->base)) { BREAK_TO_DEBUGGER(); @@ -1645,7 +1879,7 @@ static bool construct( &res_create_funcs : &res_create_maximus_funcs))) goto create_fail; - dcn20_hw_sequencer_construct(dc); + dcn21_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; @@ -1658,7 +1892,7 @@ static bool construct( create_fail: - destruct(pool); + dcn21_resource_destruct(pool); return false; } @@ -1673,7 +1907,7 @@ struct resource_pool *dcn21_create_resource_pool( if (!pool) return NULL; - if (construct(init_data->num_virtual_links, dc, pool)) + if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool)) return &pool->base; BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h new file mode 100644 index 000000000000..626d22d437f4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -0,0 +1,49 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DM_CP_PSP_IF__H +#define DM_CP_PSP_IF__H + +struct dc_link; + +struct cp_psp_stream_config { + uint8_t otg_inst; + uint8_t link_enc_inst; + uint8_t stream_enc_inst; + void *dm_stream_ctx; + bool dpms_off; +}; + +struct cp_psp_funcs { + void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config); +}; + +struct cp_psp { + void *handle; + struct cp_psp_funcs funcs; +}; + + +#endif /* DM_CP_PSP_IF__H */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index b6b4333737f2..8bde1d688f2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( /* * Polls for ACT (allocation change trigger) handled and */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream); /* @@ -118,13 +118,11 @@ bool dm_helpers_submit_i2c( const struct dc_link *link, struct i2c_command *cmd); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, bool enable ); -#endif bool dm_helpers_is_dp_sink_present( struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index c03a441ee638..ae608c329366 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -41,12 +41,8 @@ enum pp_smu_ver { */ PP_SMU_UNSUPPORTED, PP_SMU_VER_RV, -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 PP_SMU_VER_NV, -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) PP_SMU_VER_RN, -#endif PP_SMU_VER_MAX }; @@ -143,7 +139,6 @@ struct pp_smu_funcs_rv { void (*set_pme_wa_enable)(struct pp_smu *pp); }; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 /* Used by pp_smu_funcs_nv.set_voltage_by_freq * */ @@ -247,12 +242,9 @@ struct pp_smu_funcs_nv { enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp, BOOLEAN pstate_handshake_supported); }; -#endif - -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 -#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4 +#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8 #define PP_SMU_NUM_FCLK_DPM_LEVELS 4 #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 @@ -288,18 +280,13 @@ struct pp_smu_funcs_rn { enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp, struct dpm_clocks *clock_table); }; -#endif struct pp_smu_funcs { struct pp_smu ctx; union { struct pp_smu_funcs_rv rv_funcs; -#ifndef CONFIG_TRIM_DRM_AMD_DC_DCN2_0 struct pp_smu_funcs_nv nv_funcs; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct pp_smu_funcs_rn rn_funcs; -#endif }; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 1a0429744630..968ff1fef486 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -40,6 +40,9 @@ #undef DEPRECATED +struct dmub_srv; +struct dc_dmub_srv; + irq_handler_idx dm_register_interrupt( struct dc_context *ctx, struct dc_interrupt_params *int_params, @@ -139,6 +142,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub); +void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv); + +void reg_sequence_start_gather(const struct dc_context *ctx); +void reg_sequence_start_execute(const struct dc_context *ctx); +void reg_sequence_wait_done(const struct dc_context *ctx); + #define FD(reg_field) reg_field ## __SHIFT, \ reg_field ## _MASK diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 5b2a65b42403..fb6358036be8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -24,28 +24,31 @@ # It provides the general basic services required by other DAL # subcomponents. -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +dml_ccflags := -mhard-float -msse -dml_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +dml_ccflags += -mpreferred-stack-boundary=4 +else dml_ccflags += -msse2 endif CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) endif @@ -56,11 +59,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ dml_common_defs.o -ifdef CONFIG_DRM_AMD_DC_DCN2_0 +ifdef CONFIG_DRM_AMD_DC_DCN DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 649883777f62..3b224b155e8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -2577,7 +2577,9 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + if (mode_lib->vba.DRAMClockChangeSupportsVActive && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { + mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 0fafd693ffb4..6482d7b99bae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_DSC_IMAGE_WIDTH 5184 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -2610,7 +2611,12 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + if (mode_lib->vba.DRAMClockChangeSupportsVActive && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { + mode_lib->vba.DRAMClockChangeWatermark += 25; + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; + } else if (mode_lib->vba.DummyPStateCheck && + mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { @@ -3901,6 +3907,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3925,7 +3935,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 878bf4782ce6..9df24ececcec 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal; dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; - disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start - + min_dst_y_ttu_vblank) * dml_pow(2, 2)); + disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) dlg_vblank_start * dml_pow(2, 2)); ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18)); dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n", @@ -959,7 +958,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1654,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index ed8bf5f723c9..1e6aeb1bd2bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 3b6ed60dcd35..945291d5ad98 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "../display_mode_lib.h" #include "../dml_inline_defs.h" @@ -65,6 +64,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN21_MAX_DSC_IMAGE_WIDTH 5184 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3379,6 +3379,8 @@ static unsigned int TruncToValidBPP( return 30; else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; + else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) + return 18; else return BPP_INVALID; } @@ -3936,6 +3938,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3965,7 +3971,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { @@ -6117,4 +6125,3 @@ static double CalculateExtraLatency( return CalculateExtraLatency; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index a1f207cbb966..e60af383b4db 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "../display_mode_lib.h" #include "../display_mode_vba.h" @@ -1523,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; - disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; - disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;; + disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; + disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // Clamp to max for now if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23)) @@ -1820,4 +1819,3 @@ static void calculate_ttu_cursor( } } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 1c97083b8d0b..55d4cb23a073 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -135,9 +135,7 @@ enum dm_validation_status { DML_FAIL_DIO_SUPPORT, DML_FAIL_NOT_ENOUGH_DSC, DML_FAIL_DSC_CLK_REQUIRED, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DML_FAIL_DSC_VALIDATION_FAILURE, -#endif DML_FAIL_URGENT_LATENCY, DML_FAIL_REORDERING_BUFFER, DML_FAIL_DISPCLK_DPPCLK, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 704efefdcba8..2689401a03a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -25,18 +25,13 @@ #include "display_mode_lib.h" #include "dc_features.h" -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/display_mode_vba_20.h" #include "dcn20/display_rq_dlg_calc_20.h" #include "dcn20/display_mode_vba_20v2.h" #include "dcn20/display_rq_dlg_calc_20v2.h" -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 #include "dcn21/display_mode_vba_21.h" #include "dcn21/display_rq_dlg_calc_21.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) const struct dml_funcs dml20_funcs = { .validate = dml20_ModeSupportAndSystemConfigurationFull, .recalculate = dml20_recalculate, @@ -50,16 +45,13 @@ const struct dml_funcs dml20v2_funcs = { .rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg }; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 const struct dml_funcs dml21_funcs = { .validate = dml21_ModeSupportAndSystemConfigurationFull, .recalculate = dml21_recalculate, .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg }; -#endif void dml_init_instance(struct display_mode_lib *lib, const struct _vcs_dpi_soc_bounding_box_st *soc_bb, @@ -70,19 +62,15 @@ void dml_init_instance(struct display_mode_lib *lib, lib->ip = *ip_params; lib->project = project; switch (project) { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 case DML_PROJECT_NAVI10: lib->funcs = dml20_funcs; break; case DML_PROJECT_NAVI10v2: lib->funcs = dml20v2_funcs; break; -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 case DML_PROJECT_DCN21: lib->funcs = dml21_funcs; break; -#endif default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index d8c59aa356b6..cf2758ca5b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -27,20 +27,14 @@ #include "dml_common_defs.h" -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "display_mode_vba.h" -#endif enum dml_project { DML_PROJECT_UNDEFINED, DML_PROJECT_RAVEN1, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 DML_PROJECT_NAVI10, DML_PROJECT_NAVI10v2, -#endif -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 DML_PROJECT_DCN21, -#endif }; struct display_mode_lib; @@ -70,9 +64,7 @@ struct display_mode_lib { struct _vcs_dpi_ip_params_st ip; struct _vcs_dpi_soc_bounding_box_st soc; enum dml_project project; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct vba_vars_st vba; -#endif struct dal_logger *logger; struct dml_funcs funcs; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index f4c1ef9046bf..220d5e610f1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -99,6 +99,7 @@ struct _vcs_dpi_soc_bounding_box_st { unsigned int num_chans; unsigned int vmm_page_size_bytes; unsigned int hostvm_min_page_size_bytes; + unsigned int gpuvm_min_page_size_bytes; double dram_clock_change_latency_us; double dummy_pstate_latency_us; double writeback_dram_clock_change_latency_us; @@ -112,6 +113,7 @@ struct _vcs_dpi_soc_bounding_box_st { bool do_urgent_latency_adjustment; double urgent_latency_adjustment_fabric_clock_component_us; double urgent_latency_adjustment_fabric_clock_reference_mhz; + bool disable_dram_clock_change_vactive_support; }; struct _vcs_dpi_ip_params_st { @@ -145,7 +147,6 @@ struct _vcs_dpi_ip_params_st { unsigned int writeback_interface_buffer_size_kbytes; unsigned int writeback_line_buffer_buffer_size; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 unsigned int writeback_10bpc420_supported; double writeback_max_hscl_ratio; double writeback_max_vscl_ratio; @@ -155,7 +156,6 @@ struct _vcs_dpi_ip_params_st { unsigned int writeback_max_vscl_taps; unsigned int writeback_line_buffer_luma_buffer_size; unsigned int writeback_line_buffer_chroma_buffer_size; -#endif unsigned int max_page_table_levels; unsigned int max_num_dpp; @@ -225,6 +225,7 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_scan; int sw_mode; int macro_tile_size; + unsigned int surface_height_y; unsigned int viewport_width; unsigned int viewport_height; unsigned int viewport_y_y; @@ -269,7 +270,7 @@ struct writeback_st { struct _vcs_dpi_display_output_params_st { int dp_lanes; - int output_bpp; + double output_bpp; int dsc_enable; int wb_enable; int num_active_wb; @@ -318,6 +319,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_width; unsigned int vready_offset; unsigned char interlaced; + unsigned char embedded; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; @@ -400,6 +402,7 @@ struct _vcs_dpi_display_rq_misc_params_st { struct _vcs_dpi_display_rq_params_st { unsigned char yuv420; unsigned char yuv420_10bpc; + unsigned char rgbe_alpha; display_rq_misc_params_st misc; display_rq_sizing_params_st sizing; display_rq_dlg_params_st dlg; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 65cf4edddaff..15b72a8b5174 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #include "display_mode_lib.h" #include "display_mode_vba.h" @@ -222,13 +221,17 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib) mode_lib->vba.SRExitTime = soc->sr_exit_time_us; mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us; mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us; + mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us; + mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support || + mode_lib->vba.DummyPStateCheck; + mode_lib->vba.Downspreading = soc->downspread_percent; mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new! mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new! mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes; - mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024; + mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024; mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 1024; // Set the voltage scaling clocks as the defaults. Most of these will // be set to different values by the test @@ -375,6 +378,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; + mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = (enum scan_direction_class) (src->source_scan); @@ -432,8 +436,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode? mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine; + mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = + dst->odm_combine; mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] = (enum output_format_class) (dout->output_format); + mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = + dout->output_bpp; mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] = (enum output_encoder_class) (dout->output_type); @@ -853,4 +861,3 @@ double CalculateWriteBackDISPCLK( return CalculateWriteBackDISPCLK; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 91decac50557..3eb657ed5714 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -23,7 +23,6 @@ * */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #ifndef __DML2_DISPLAY_MODE_VBA_H__ #define __DML2_DISPLAY_MODE_VBA_H__ @@ -155,6 +154,8 @@ struct vba_vars_st { double UrgentLatencySupportUsChroma; unsigned int DSCFormatFactor; + bool DummyPStateCheck; + bool DRAMClockChangeSupportsVActive; bool PrefetchModeSupported; enum self_refresh_affinity AllowDRAMSelfRefreshOrDRAMClockChangeInVblank; // Mode Support only double XFCRemoteSurfaceFlipDelay; @@ -387,6 +388,7 @@ struct vba_vars_st { /* vba mode support */ /*inputs*/ + bool EmbeddedPanel[DC__NUM_DPP__MAX]; bool SupportGFX7CompatibleTilingIn32bppAnd64bpp; double MaxHSCLRatio; double MaxVSCLRatio; @@ -869,4 +871,3 @@ double CalculateWriteBackDISPCLK( unsigned int WritebackChromaLineBufferWidth); #endif /* _DML2_DISPLAY_MODE_VBA_H_ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index ad8571f5a142..4c3e9cc30167 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -243,7 +243,7 @@ void dml1_extract_rq_regs( rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - /* FIXME: take the max between luma, chroma chunk size? + /* TODO: take the max between luma, chroma chunk size? * okay for now, as we are setting chunk_bytes to 8kb anyways */ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ @@ -602,7 +602,7 @@ static void get_surf_rq_param( unsigned int log2_dpte_group_length; unsigned int func_meta_row_height, func_dpte_row_height; - /* FIXME check if ppe apply for both luma and chroma in 422 case */ + /* TODO check if ppe apply for both luma and chroma in 422 case */ if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params( ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */ - prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */ + prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz; min_ttu_vblank = dlg_sys_param.t_urg_wm_us; @@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params( dcc_en = e2e_pipe_param.pipe.src.dcc; dual_plane = is_dual_plane( (enum source_format_class) e2e_pipe_param.pipe.src.source_format); - mode_422 = 0; /* FIXME */ + mode_422 = 0; /* TODO */ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ bytes_per_element_l = get_bytes_per_element( (enum source_format_class) e2e_pipe_param.pipe.src.source_format, @@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params( cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1) * (double) cur0_req_width; cur0_req_per_width = cur0_width_ub / (double) cur0_req_width; - hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */ + hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */ if (vratio_pre_l <= 1.0) { refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index b456cd23c6fa..641ffb7cfaed 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,15 +1,21 @@ +# SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +dsc_ccflags := -mhard-float -msse -dsc_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +dsc_ccflags += -mpreferred-stack-boundary=4 +else dsc_ccflags += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 5995bcdfed54..d2423ad1fac2 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -22,30 +22,65 @@ * Author: AMD */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dc.h" -#include "core_types.h" +#include "dc_hw_types.h" #include "dsc.h" #include <drm/drm_dp_helper.h> +#include "dc.h" -struct dc_dsc_policy { - bool use_min_slices_h; - int max_slices_h; // Maximum available if 0 - int min_sice_height; // Must not be less than 8 - int max_target_bpp; - int min_target_bpp; // Minimum target bits per pixel -}; +/* This module's internal functions */ -const struct dc_dsc_policy dsc_policy = { - .use_min_slices_h = true, // DSC Policy: Use minimum number of slices that fits the pixel clock - .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 4 for or 8, depending on the mode) - .min_sice_height = 108, // DSC Policy: Use slice height recommended by VESA DSC Spreadsheet user guide - .max_target_bpp = 16, - .min_target_bpp = 8, -}; +static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing) +{ + uint32_t bits_per_channel = 0; + uint32_t kbps; + if (timing->flags.DSC) { + kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); + kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); + return kbps; + } -/* This module's internal functions */ + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; + } + + ASSERT(bits_per_channel != 0); + + kbps = timing->pix_clk_100hz / 10; + kbps *= bits_per_channel; + + if (timing->flags.Y_ONLY != 1) { + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ + kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } + + return kbps; + +} static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -178,16 +213,18 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp } static void get_dsc_enc_caps( - const struct dc *dc, + const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { // This is a static HW query, so we can use any DSC - struct display_stream_compressor *dsc = dc->res_pool->dscs[0]; memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) + if (dsc) { dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + } } /* Returns 'false' if no intersection was found for at least one capablity. @@ -290,7 +327,7 @@ static void get_dsc_bandwidth_range( struct dc_dsc_bw_range *range) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing); /* max dsc target bpp */ range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz); @@ -512,6 +549,7 @@ static bool setup_dsc_config( const struct dsc_enc_caps *dsc_enc_caps, int target_bandwidth_kbps, const struct dc_crtc_timing *timing, + int min_slice_height_override, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -526,9 +564,11 @@ static bool setup_dsc_config( bool is_dsc_possible = false; int pic_height; int slice_height; + struct dc_dsc_policy policy; memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); + dc_dsc_get_policy_for_timing(timing, &policy); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -544,7 +584,12 @@ static bool setup_dsc_config( goto done; if (target_bandwidth_kbps > 0) { - is_dsc_possible = decide_dsc_target_bpp_x16(&dsc_policy, &dsc_common_caps, target_bandwidth_kbps, timing, &target_bpp); + is_dsc_possible = decide_dsc_target_bpp_x16( + &policy, + &dsc_common_caps, + target_bandwidth_kbps, + timing, + &target_bpp); dsc_cfg->bits_per_pixel = target_bpp; } if (!is_dsc_possible) @@ -646,20 +691,20 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - if (dsc_policy.use_min_slices_h) { + if (policy.use_min_slices_h) { if (min_slices_h > 0) num_slices_h = min_slices_h; else if (max_slices_h > 0) { // Fall back to max slices if min slices is not working out - if (dsc_policy.max_slices_h) - num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); + if (policy.max_slices_h) + num_slices_h = min(policy.max_slices_h, max_slices_h); else num_slices_h = max_slices_h; } else is_dsc_possible = false; } else { if (max_slices_h > 0) { - if (dsc_policy.max_slices_h) - num_slices_h = min(dsc_policy.max_slices_h, max_slices_h); + if (policy.max_slices_h) + num_slices_h = min(policy.max_slices_h, max_slices_h); else num_slices_h = max_slices_h; } else if (min_slices_h > 0) // Fall back to min slices if max slices is not possible @@ -680,7 +725,10 @@ static bool setup_dsc_config( // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by. // For 4:2:0 make sure the slice height is divisible by 2 as well. - slice_height = min(dsc_policy.min_sice_height, pic_height); + if (min_slice_height_override == 0) + slice_height = min(policy.min_slice_height, pic_height); + else + slice_height = min(min_slice_height_override, pic_height); while (slice_height < pic_height && (pic_height % slice_height != 0 || (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0))) @@ -802,7 +850,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp * If DSC is not possible, leave '*range' untouched. */ bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_bpp, const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -814,16 +863,14 @@ bool dc_dsc_compute_bandwidth_range( struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) - is_dsc_possible = setup_dsc_config(dsc_sink_caps, - &dsc_enc_caps, - 0, - timing, &config); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, + dsc_min_slice_height_override, &config); if (is_dsc_possible) get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range); @@ -832,8 +879,9 @@ bool dc_dsc_compute_bandwidth_range( } bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg) @@ -841,11 +889,69 @@ bool dc_dsc_compute_config( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, dsc_cfg); + timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } -#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ + +void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc_dsc_policy *policy) +{ + uint32_t bpc = 0; + + policy->min_target_bpp = 0; + policy->max_target_bpp = 0; + + /* DSC Policy: Use minimum number of slices that fits the pixel clock */ + policy->use_min_slices_h = true; + + /* DSC Policy: Use max available slices + * (in our case 4 for or 8, depending on the mode) + */ + policy->max_slices_h = 0; + + /* DSC Policy: Use slice height recommended + * by VESA DSC Spreadsheet user guide + */ + policy->min_slice_height = 108; + + /* DSC Policy: follow DP specs with an internal upper limit to 16 bpp + * for better interoperability + */ + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + bpc = 8; + break; + case COLOR_DEPTH_101010: + bpc = 10; + break; + case COLOR_DEPTH_121212: + bpc = 12; + break; + default: + return; + } + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + case PIXEL_ENCODING_YCBCR422: /* assume no YCbCr422 native support */ + /* DP specs limits to 8 */ + policy->min_target_bpp = 8; + /* DP specs limits to 3 x bpc */ + policy->max_target_bpp = 3 * bpc; + break; + case PIXEL_ENCODING_YCBCR420: + /* DP specs limits to 6 */ + policy->min_target_bpp = 6; + /* DP specs limits to 1.5 x bpc assume bpc is an even number */ + policy->max_target_bpp = bpc * 3 / 2; + break; + default: + return; + } + /* internal upper limit to 16 bpp */ + if (policy->max_target_bpp > 16) + policy->max_target_bpp = 16; +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h index 020ad8f685ea..9f70e87b3ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -51,4 +50,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h index f66d006eac5d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -703,4 +702,3 @@ const qp_table qp_table_422_8bpc_max = { { 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} } }; -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index ca51e83f8764..03ae15946c6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -177,7 +176,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com { float bpp_group; float initial_xmit_delay_factor; - int source_bpp; int padding_pixels; int i; @@ -217,8 +215,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->initial_xmit_delay++; } - source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5); - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_det_thresh = 2 << (bpc - 8); @@ -255,4 +251,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->rc_buf_thresh[13] = 8064; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index f1d6e793bc61..b6b1f09c2009 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* * Copyright 2017 Advanced Micro Devices, Inc. @@ -82,4 +81,3 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 73172fd0b529..1f6e63b71456 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -1,4 +1,3 @@ -#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /* * Copyright 2012-17 Advanced Micro Devices, Inc. * @@ -144,4 +143,3 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par return ret; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index b3062275711e..202baa210cc8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -61,26 +61,25 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120) ############################################################################### # DCN 1x ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10) -endif ############################################################################### # DCN 2 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0 GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20) -endif -ifdef CONFIG_DRM_AMD_DC_DCN2_1 +############################################################################### +# DCN 21 +############################################################################### GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index 43a440385b43..83f798cb8b21 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dm_services.h" #include "include/gpio_types.h" #include "../hw_factory.h" @@ -110,6 +109,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(4), ddc_data_regs_dcn2(5), ddc_data_regs_dcn2(6), + { + DDC_GPIO_VGA_REG_LIST(DATA), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } }; static const struct ddc_registers ddc_clk_regs_dcn[] = { @@ -119,6 +124,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(4), ddc_clk_regs_dcn2(5), ddc_clk_regs_dcn2(6), + { + DDC_GPIO_VGA_REG_LIST(CLK), + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + } }; static const struct ddc_sh_mask ddc_shift[] = { @@ -246,4 +257,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory) factory->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h index 43a4ce7aa3bf..0fd9b315bd7a 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifndef __DAL_HW_FACTORY_DCN20_H__ #define __DAL_HW_FACTORY_DCN20_H__ @@ -30,4 +29,3 @@ void dal_hw_factory_dcn20_init(struct hw_factory *factory); #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c index 915e896e0e91..52ba62b3b5e4 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c @@ -26,7 +26,6 @@ /* * Pre-requisites: headers required by header of this unit */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "hw_translate_dcn20.h" #include "dm_services.h" @@ -379,4 +378,3 @@ void dal_hw_translate_dcn20_init(struct hw_translate *tr) tr->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h index 01f52c7bed86..5f7a35530e26 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifndef __DAL_HW_TRANSLATE_DCN20_H__ #define __DAL_HW_TRANSLATE_DCN20_H__ @@ -32,4 +31,3 @@ struct hw_translate; void dal_hw_translate_dcn20_init(struct hw_translate *tr); #endif /* __DAL_HW_TRANSLATE_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c index 8572678f8d4f..907c5911eb9e 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dm_services.h" #include "include/gpio_types.h" #include "../hw_factory.h" @@ -239,4 +238,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory) factory->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h index 2443f9e7afbf..4949e0c7fa06 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #ifndef __DAL_HW_FACTORY_DCN21_H__ #define __DAL_HW_FACTORY_DCN21_H__ @@ -30,4 +29,3 @@ void dal_hw_factory_dcn21_init(struct hw_factory *factory); #endif /* __DAL_HW_FACTORY_DCN20_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index fbb58fb8c318..291966efe63d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -26,7 +26,6 @@ /* * Pre-requisites: headers required by header of this unit */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "hw_translate_dcn21.h" #include "dm_services.h" @@ -382,4 +381,3 @@ void dal_hw_translate_dcn21_init(struct hw_translate *tr) tr->funcs = &funcs; } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h index 2bfaac24c574..9462b0a65200 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h @@ -22,7 +22,6 @@ * Authors: AMD * */ -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #ifndef __DAL_HW_TRANSLATE_DCN21_H__ #define __DAL_HW_TRANSLATE_DCN21_H__ @@ -32,4 +31,3 @@ struct hw_translate; void dal_hw_translate_dcn21_init(struct hw_translate *tr); #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */ -#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h index f91e85b04956..308a543178a5 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h @@ -48,13 +48,11 @@ DDC_GPIO_REG_LIST(cd,id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_REG_LIST_DCN2(cd, id) \ DDC_GPIO_REG_LIST(cd, id),\ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP),\ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif #define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\ @@ -90,13 +88,11 @@ DDC_GPIO_I2C_REG_LIST(cd),\ .ddc_setup = 0 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_I2C_REG_LIST_DCN2(cd) \ DDC_GPIO_I2C_REG_LIST(cd),\ .ddc_setup = 0,\ .phy_aux_cntl = REG(PHY_AUX_CNTL), \ .dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5) -#endif #define DDC_MASK_SH_LIST_COMMON(mask_sh) \ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\ @@ -110,22 +106,18 @@ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh) -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DDC_MASK_SH_LIST_DCN2(mask_sh, cd) \ {DDC_MASK_SH_LIST_COMMON(mask_sh),\ 0,\ 0,\ (PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\ (DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)} -#endif struct ddc_registers { struct gpio_registers gpio; uint32_t ddc_setup; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t phy_aux_cntl; uint32_t dc_gpio_aux_ctrl_5; -#endif }; struct ddc_sh_mask { @@ -140,11 +132,9 @@ struct ddc_sh_mask { /* i2cpad_mask */ uint32_t DC_GPIO_SDA_PD_DIS; uint32_t DC_GPIO_SCL_PD_DIS; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) //phy_aux_cntl uint32_t AUX_PAD_RXSEL; uint32_t DDC_PAD_I2CMODE; -#endif }; @@ -180,7 +170,6 @@ struct ddc_sh_mask { {\ DDC_I2C_REG_LIST(SCL)\ } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define ddc_data_regs_dcn2(id) \ {\ DDC_REG_LIST_DCN2(DATA, id)\ @@ -200,7 +189,6 @@ struct ddc_sh_mask { {\ DDC_REG_LIST_DCN2(SCL)\ } -#endif #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c index f8f85490e77e..f67c18375bfd 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c @@ -321,8 +321,6 @@ void dal_gpio_destroy( return; } - dal_gpio_close(*gpio); - switch ((*gpio)->id) { case GPIO_ID_DDC_DATA: kfree((*gpio)->hw_container.ddc); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index d03165e71dc6..92280cc05e2d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux( return; } - dal_gpio_close(*mux); dal_gpio_destroy(mux); kfree(*mux); @@ -460,7 +459,6 @@ void dal_gpio_destroy_irq( return; } - dal_gpio_close(*irq); dal_gpio_destroy(irq); kfree(*irq); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 1c12961f6472..1ae153eab31d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -48,18 +48,18 @@ struct gpio; -static void destruct( +static void dal_hw_ddc_destruct( struct hw_ddc *pin) { dal_hw_gpio_destruct(&pin->base); } -static void destroy( +static void dal_hw_ddc_destroy( struct hw_gpio_pin **ptr) { struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr); - destruct(pin); + dal_hw_ddc_destruct(pin); kfree(pin); @@ -150,7 +150,6 @@ static enum gpio_result set_config( AUX_PAD1_MODE, 0); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) { REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1); } @@ -158,7 +157,6 @@ static enum gpio_result set_config( if (ddc->regs->phy_aux_cntl != 0) { REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1); } -#endif return GPIO_RESULT_OK; case GPIO_DDC_CONFIG_TYPE_MODE_AUX: /* set the AUX pad mode */ @@ -166,12 +164,10 @@ static enum gpio_result set_config( REG_SET(gpio.MASK_reg, regval, AUX_PAD1_MODE, 1); } -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) { REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 0); } -#endif return GPIO_RESULT_OK; case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT: @@ -211,7 +207,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_ddc_destroy, .open = dal_hw_gpio_open, .get_value = dal_hw_gpio_get_value, .set_value = dal_hw_gpio_set_value, @@ -220,7 +216,7 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( +static void dal_hw_ddc_construct( struct hw_ddc *ddc, enum gpio_id id, uint32_t en, @@ -247,7 +243,7 @@ void dal_hw_ddc_init( return; } - construct(*hw_ddc, id, en, ctx); + dal_hw_ddc_construct(*hw_ddc, id, en, ctx); } struct hw_gpio_pin *dal_hw_ddc_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index fa9f1d055ec8..d2d36d48caaa 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -45,15 +45,11 @@ #include "dce80/hw_factory_dce80.h" #include "dce110/hw_factory_dce110.h" #include "dce120/hw_factory_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_factory_dcn10.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/hw_factory_dcn20.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_factory_dcn21.h" -#endif #include "diagnostics/hw_factory_diag.h" @@ -90,19 +86,15 @@ bool dal_hw_factory_init( case DCE_VERSION_12_1: dal_hw_factory_dce120_init(factory); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: dal_hw_factory_dcn10_init(factory); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: dal_hw_factory_dcn20_init(factory); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_factory_dcn21_init(factory); return true; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c index 69b899741f6d..f9e847e6555d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c @@ -46,22 +46,13 @@ struct gpio; -static void dal_hw_generic_construct( - struct hw_generic *pin, - enum gpio_id id, - uint32_t en, - struct dc_context *ctx) -{ - dal_hw_gpio_construct(&pin->base, id, en, ctx); -} - static void dal_hw_generic_destruct( struct hw_generic *pin) { dal_hw_gpio_destruct(&pin->base); } -static void destroy( +static void dal_hw_generic_destroy( struct hw_gpio_pin **ptr) { struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr); @@ -90,7 +81,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_generic_destroy, .open = dal_hw_gpio_open, .get_value = dal_hw_gpio_get_value, .set_value = dal_hw_gpio_set_value, @@ -99,14 +90,14 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( - struct hw_generic *generic, +static void dal_hw_generic_construct( + struct hw_generic *pin, enum gpio_id id, uint32_t en, struct dc_context *ctx) { - dal_hw_generic_construct(generic, id, en, ctx); - generic->base.base.funcs = &funcs; + dal_hw_gpio_construct(&pin->base, id, en, ctx); + pin->base.base.funcs = &funcs; } void dal_hw_generic_init( @@ -126,7 +117,7 @@ void dal_hw_generic_init( return; } - construct(*hw_generic, id, en, ctx); + dal_hw_generic_construct(*hw_generic, id, en, ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c index 00c9bcf660a3..692f29de7797 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c @@ -46,34 +46,18 @@ struct gpio; -static void dal_hw_hpd_construct( - struct hw_hpd *pin, - enum gpio_id id, - uint32_t en, - struct dc_context *ctx) -{ - dal_hw_gpio_construct(&pin->base, id, en, ctx); -} - static void dal_hw_hpd_destruct( struct hw_hpd *pin) { dal_hw_gpio_destruct(&pin->base); } - -static void destruct( - struct hw_hpd *hpd) -{ - dal_hw_hpd_destruct(hpd); -} - -static void destroy( +static void dal_hw_hpd_destroy( struct hw_gpio_pin **ptr) { struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr); - destruct(hpd); + dal_hw_hpd_destruct(hpd); kfree(hpd); @@ -120,7 +104,7 @@ static enum gpio_result set_config( } static const struct hw_gpio_pin_funcs funcs = { - .destroy = destroy, + .destroy = dal_hw_hpd_destroy, .open = dal_hw_gpio_open, .get_value = get_value, .set_value = dal_hw_gpio_set_value, @@ -129,14 +113,14 @@ static const struct hw_gpio_pin_funcs funcs = { .close = dal_hw_gpio_close, }; -static void construct( - struct hw_hpd *hpd, +static void dal_hw_hpd_construct( + struct hw_hpd *pin, enum gpio_id id, uint32_t en, struct dc_context *ctx) { - dal_hw_hpd_construct(hpd, id, en, ctx); - hpd->base.base.funcs = &funcs; + dal_hw_gpio_construct(&pin->base, id, en, ctx); + pin->base.base.funcs = &funcs; } void dal_hw_hpd_init( @@ -156,7 +140,7 @@ void dal_hw_hpd_init( return; } - construct(*hw_hpd, id, en, ctx); + dal_hw_hpd_construct(*hw_hpd, id, en, ctx); } struct hw_gpio_pin *dal_hw_hpd_get_pin(struct gpio *gpio) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index f2046f55d6a8..5d396657a1ee 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -43,15 +43,11 @@ #include "dce80/hw_translate_dce80.h" #include "dce110/hw_translate_dce110.h" #include "dce120/hw_translate_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/hw_translate_dcn10.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/hw_translate_dcn20.h" -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #include "dcn21/hw_translate_dcn21.h" -#endif #include "diagnostics/hw_translate_diag.h" @@ -85,19 +81,15 @@ bool dal_hw_translate_init( case DCE_VERSION_12_1: dal_hw_translate_dce120_init(translate); return true; -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) case DCN_VERSION_1_0: case DCN_VERSION_1_01: dal_hw_translate_dcn10_init(translate); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) case DCN_VERSION_2_0: dal_hw_translate_dcn20_init(translate); return true; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) case DCN_VERSION_2_1: dal_hw_translate_dcn21_init(translate); return true; diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile new file mode 100644 index 000000000000..4170b6eb9ec0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile @@ -0,0 +1,28 @@ +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'hdcp' sub-component of DAL. +# + +HDCP_MSG = hdcp_msg.o + +AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG) diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c new file mode 100644 index 000000000000..6f730b5bfe42 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -0,0 +1,324 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/slab.h> + +#include "dm_services.h" +#include "dm_helpers.h" +#include "include/hdcp_types.h" +#include "include/i2caux_interface.h" +#include "include/signal_types.h" +#include "core_types.h" +#include "dc_link_ddc.h" +#include "link_hwss.h" + +#define DC_LOGGER \ + link->ctx->logger +#define HDCP14_KSV_SIZE 5 +#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE + +static const bool hdcp_cmd_is_read[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = true, + [HDCP_MESSAGE_ID_READ_RI_R0] = true, + [HDCP_MESSAGE_ID_READ_PJ] = true, + [HDCP_MESSAGE_ID_WRITE_AKSV] = false, + [HDCP_MESSAGE_ID_WRITE_AINFO] = false, + [HDCP_MESSAGE_ID_WRITE_AN] = false, + [HDCP_MESSAGE_ID_READ_VH_X] = true, + [HDCP_MESSAGE_ID_READ_VH_0] = true, + [HDCP_MESSAGE_ID_READ_VH_1] = true, + [HDCP_MESSAGE_ID_READ_VH_2] = true, + [HDCP_MESSAGE_ID_READ_VH_3] = true, + [HDCP_MESSAGE_ID_READ_VH_4] = true, + [HDCP_MESSAGE_ID_READ_BCAPS] = true, + [HDCP_MESSAGE_ID_READ_BSTATUS] = true, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true, + [HDCP_MESSAGE_ID_READ_BINFO] = true, + [HDCP_MESSAGE_ID_HDCP2VERSION] = true, + [HDCP_MESSAGE_ID_RX_CAPS] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = true, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [HDCP_MESSAGE_ID_READ_PJ] = 0xA, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, + [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70 +}; + +struct protection_properties { + bool supported; + bool (*process_transaction)( + struct dc_link *link, + struct hdcp_protection_message *message_info); +}; + +static const struct protection_properties non_supported_protection = { + .supported = false +}; + +static bool hdmi_14_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + uint8_t *buff = NULL; + bool result; + const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/ + const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/ + struct i2c_command i2c_command; + uint8_t offset = hdcp_i2c_offsets[message_info->msg_id]; + struct i2c_payload i2c_payloads[] = { + { true, 0, 1, &offset }, + /* actual hdcp payload, will be filled later, zeroed for now*/ + { 0 } + }; + + switch (message_info->link) { + case HDCP_LINK_SECONDARY: + i2c_payloads[0].address = hdcp_i2c_addr_link_secondary; + i2c_payloads[1].address = hdcp_i2c_addr_link_secondary; + break; + case HDCP_LINK_PRIMARY: + default: + i2c_payloads[0].address = hdcp_i2c_addr_link_primary; + i2c_payloads[1].address = hdcp_i2c_addr_link_primary; + break; + } + + if (hdcp_cmd_is_read[message_info->msg_id]) { + i2c_payloads[1].write = false; + i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads); + i2c_payloads[1].length = message_info->length; + i2c_payloads[1].data = message_info->data; + } else { + i2c_command.number_of_payloads = 1; + buff = kzalloc(message_info->length + 1, GFP_KERNEL); + + if (!buff) + return false; + + buff[0] = offset; + memmove(&buff[1], message_info->data, message_info->length); + i2c_payloads[0].length = message_info->length + 1; + i2c_payloads[0].data = buff; + } + + i2c_command.payloads = i2c_payloads; + i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW + i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz; + + result = dm_helpers_submit_i2c( + link->ctx, + link, + &i2c_command); + kfree(buff); + + return result; +} + +static const struct protection_properties hdmi_14_protection = { + .supported = true, + .process_transaction = hdmi_14_process_transaction +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, + [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 +}; + +static bool dpcd_access_helper( + struct dc_link *link, + uint32_t length, + uint8_t *data, + uint32_t dpcd_addr, + bool is_read) +{ + enum dc_status status; + uint32_t cur_length = 0; + uint32_t offset = 0; + uint32_t ksv_read_size = 0x6803b - 0x6802c; + + /* Read KSV, need repeatedly handle */ + if (dpcd_addr == 0x6802c) { + if (length % HDCP14_KSV_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n", + __func__, + length, + HDCP14_KSV_SIZE); + } + if (length > HDCP14_MAX_KSV_FIFO_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n", + __func__, + length, + HDCP14_MAX_KSV_FIFO_SIZE); + } + + DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n", + __func__, + length / HDCP14_KSV_SIZE); + + while (length > 0) { + if (length > ksv_read_size) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + ksv_read_size); + + data += ksv_read_size; + length -= ksv_read_size; + } else { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + length); + + data += length; + length = 0; + } + + if (status != DC_OK) + return false; + } + } else { + while (length > 0) { + if (length > DEFAULT_AUX_MAX_DATA_SIZE) + cur_length = DEFAULT_AUX_MAX_DATA_SIZE; + else + cur_length = length; + + if (is_read) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } else { + status = core_link_write_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } + + if (status != DC_OK) + return false; + + length -= cur_length; + offset += cur_length; + } + } + return true; +} + +static bool dp_11_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + return dpcd_access_helper( + link, + message_info->length, + message_info->data, + hdcp_dpcd_addrs[message_info->msg_id], + hdcp_cmd_is_read[message_info->msg_id]); +} + +static const struct protection_properties dp_11_protection = { + .supported = true, + .process_transaction = dp_11_process_transaction +}; + diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index fd39e2abe2ed..4ead89dd7c41 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -43,10 +43,8 @@ enum dc_status { DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ DC_FAIL_SCALING = 14, DC_FAIL_DP_LINK_TRAINING = 15, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 DC_FAIL_DSC_VALIDATE = 16, DC_NO_DSC_RESOURCE = 17, -#endif DC_FAIL_UNSUPPORTED_1 = 18, DC_FAIL_CLK_EXCEED_MAX = 21, DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f189307750ab..16f6ef22367b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -33,13 +33,11 @@ #include "dc_bios_types.h" #include "mem_input.h" #include "hubp.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "mpc.h" #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dwb.h" #include "mcif_wb.h" -#endif #define MAX_CLOCK_SOURCES 7 @@ -52,7 +50,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, #include "clock_source.h" #include "audio.h" #include "dm_pp_smu.h" - +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif /************ link *****************/ struct link_init_data { @@ -87,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); struct resource_pool; struct dc_state; struct resource_context; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) struct clk_bw_params; -#endif struct resource_funcs { void (*destroy)(struct resource_pool **pool); @@ -103,7 +101,7 @@ struct resource_funcs { int (*populate_dml_pipes)( struct dc *dc, - struct resource_context *res_ctx, + struct dc_state *context, display_e2e_pipe_params_st *pipes); enum dc_status (*validate_global)( @@ -133,7 +131,6 @@ struct resource_funcs { struct resource_context *res_ctx, const struct resource_pool *pool, struct dc_stream_state *stream); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*populate_dml_writeback_from_context)( struct dc *dc, struct resource_context *res_ctx, @@ -144,12 +141,9 @@ struct resource_funcs { struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) void (*update_bw_bounding_box)( struct dc *dc, struct clk_bw_params *bw_params); -#endif }; @@ -178,7 +172,6 @@ struct resource_pool { struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; bool i2c_hw_buffer_in_use; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dwbc *dwbc[MAX_DWB_PIPES]; struct mcif_wb *mcif_wb[MAX_DWB_PIPES]; struct { @@ -186,11 +179,8 @@ struct resource_pool { unsigned int gsl_1:1; unsigned int gsl_2:1; } gsl_groups; -#endif -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dscs[MAX_PIPES]; -#endif unsigned int pipe_count; unsigned int underlay_pipe_index; @@ -204,9 +194,7 @@ struct resource_pool { unsigned int timing_generator_count; unsigned int mpcc_count; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) unsigned int writeback_pipe_count; -#endif /* * reserved clock source for DP */ @@ -227,17 +215,18 @@ struct resource_pool { const struct resource_funcs *funcs; const struct resource_caps *res_cap; + + struct ddc_service *oem_device; }; struct dcn_fe_bandwidth { int dppclk_khz; + }; struct stream_resource { struct output_pixel_processor *opp; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct display_stream_compressor *dsc; -#endif struct timing_generator *tg; struct stream_encoder *stream_enc; struct audio *audio; @@ -246,12 +235,10 @@ struct stream_resource { struct encoder_info_frame encoder_info_frame; struct abm *abm; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* There are only (num_pipes+1)/2 groups. 0 means unassigned, * otherwise it's using group number 'gsl_group-1' */ uint8_t gsl_group; -#endif }; struct plane_resource { @@ -303,17 +290,15 @@ struct pipe_ctx { struct pipe_ctx *next_odm_pipe; struct pipe_ctx *prev_odm_pipe; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct _vcs_dpi_display_dlg_regs_st dlg_regs; struct _vcs_dpi_display_ttu_regs_st ttu_regs; struct _vcs_dpi_display_rq_regs_st rq_regs; struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param; #endif union pipe_update_flags update_flags; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dwbc *dwbc; struct mcif_wb *mcif_wb; -#endif }; struct resource_context { @@ -322,9 +307,7 @@ struct resource_context { bool is_audio_acquired[MAX_PIPES]; uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES]; uint8_t dp_clock_source_ref_count; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool is_dsc_acquired[MAX_PIPES]; -#endif }; struct dce_bw_output { @@ -344,18 +327,14 @@ struct dce_bw_output { int blackout_recovery_time_us; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dcn_bw_writeback { struct mcif_arb_params mcif_wb_arb[MAX_DWB_PIPES]; }; -#endif struct dcn_bw_output { struct dc_clocks clk; struct dcn_watermark_set watermarks; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dcn_bw_writeback bw_writeback; -#endif }; union bw_output { @@ -389,16 +368,12 @@ struct dc_state { /* Note: these are big structures, do *not* put on stack! */ struct dm_pp_display_configuration pp_display_cfg; -#ifdef CONFIG_DRM_AMD_DC_DCN1_0 +#ifdef CONFIG_DRM_AMD_DC_DCN struct dcn_bw_internal_vars dcn_bw_vars; #endif struct clk_mgr *clk_mgr; - struct { - bool full_update_needed : 1; - } commit_hints; - struct kref refcount; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index b1fab251c09b..de2d160114db 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload); + int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_channel_operation_result *operation_result); @@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); +uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout); + void dal_ddc_service_write_scdc_data( struct ddc_service *ddc_service, uint32_t pix_clk, diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 08a4df2c61a8..6198bccd6199 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,6 +28,8 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ struct dc_link; struct dc_stream_state; @@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries( struct dc_link_settings *known_limit_link_setting, int attempts); +bool dp_verify_mst_link_cap( + struct dc_link *link); + bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); @@ -52,10 +57,11 @@ void decide_link_settings( struct dc_link_settings *link_setting); bool perform_link_training_with_retries( - struct dc_link *link, const struct dc_link_settings *link_setting, bool skip_video_pattern, - int attempts); + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal); bool is_mst_supported(struct dc_link *link); @@ -70,13 +76,11 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable); enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); -#endif #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index e79cd4e92919..e77b3a76766d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -140,6 +140,9 @@ struct write_command_context { struct aux_engine_funcs { + bool (*configure_timeout)( + struct ddc_service *ddc, + uint32_t timeout); void (*destroy)( struct aux_engine **ptr); bool (*acquire_engine)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 76f9ad1b23df..ac530c057ddd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -31,7 +31,6 @@ #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Constants */ #define DDR4_DRAM_WIDTH 64 #define WM_A 0 @@ -39,15 +38,13 @@ #define WM_C 2 #define WM_D 3 #define WM_SET_COUNT 4 -#endif #define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000 -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Will these bw structures be ASIC specific? */ -#define MAX_NUM_DPM_LVL 4 +#define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 @@ -69,6 +66,8 @@ struct wm_range_table_entry { unsigned int wm_inst; unsigned int wm_type; double pstate_latency_us; + double sr_exit_time_us; + double sr_enter_plus_exit_time_us; bool valid; }; @@ -152,7 +151,6 @@ struct clk_bw_params { struct clk_limit_table clk_table; struct wm_table wm_table; }; -#endif /* Public interfaces */ struct clk_states { @@ -180,16 +178,21 @@ struct clk_mgr_funcs { struct dc_state *context, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); + + bool (*are_clock_states_equal) (struct dc_clocks *a, + struct dc_clocks *b); + void (*notify_wm_ranges)(struct clk_mgr *clk_mgr); }; struct clk_mgr { struct dc_context *ctx; struct clk_mgr_funcs *funcs; struct dc_clocks clks; + bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes -#ifdef CONFIG_DRM_AMD_DC_DCN2_1 + int dentist_vco_freq_khz; + struct clk_state_registers_and_bypass boot_snapshot; struct clk_bw_params *bw_params; -#endif }; /* forward declarations */ @@ -199,4 +202,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr); +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + #endif /* __DAL_CLK_MGR_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 7dd46eb96d67..862952c0286a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -96,12 +96,10 @@ enum dentist_divider_range { .MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \ .MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67 -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK_REG_LIST_NV10() \ SR(DENTIST_DISPCLK_CNTL), \ CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \ CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0) -#endif #define CLK_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -120,7 +118,6 @@ enum dentist_divider_range { CLK_SF(MP1_SMN_C2PMSG_83, CONTENT, mask_sh),\ CLK_SF(MP1_SMN_C2PMSG_91, CONTENT, mask_sh), -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh) \ CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\ @@ -130,7 +127,6 @@ enum dentist_divider_range { CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh) -#endif #define CLK_REG_FIELD_LIST(type) \ type DPREFCLK_SRC_SEL; \ @@ -143,30 +139,24 @@ enum dentist_divider_range { ****************** Clock Manager Private Structures *********************************** *************************************************************************************** */ -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define CLK20_REG_FIELD_LIST(type) \ type DENTIST_DPPCLK_WDIVIDER; \ type DENTIST_DPPCLK_CHG_DONE; \ type FbMult_int; \ type FbMult_frac; -#endif #define VBIOS_SMU_REG_FIELD_LIST(type) \ type CONTENT; struct clk_mgr_shift { CLK_REG_FIELD_LIST(uint8_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 CLK20_REG_FIELD_LIST(uint8_t) -#endif VBIOS_SMU_REG_FIELD_LIST(uint32_t) }; struct clk_mgr_mask { CLK_REG_FIELD_LIST(uint32_t) -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 CLK20_REG_FIELD_LIST(uint32_t) -#endif VBIOS_SMU_REG_FIELD_LIST(uint32_t) }; @@ -174,16 +164,29 @@ struct clk_mgr_registers { uint32_t DPREFCLK_CNTL; uint32_t DENTIST_DISPCLK_CNTL; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 uint32_t CLK3_CLK2_DFS_CNTL; uint32_t CLK3_CLK_PLL_REQ; -#endif uint32_t MP1_SMN_C2PMSG_67; uint32_t MP1_SMN_C2PMSG_83; uint32_t MP1_SMN_C2PMSG_91; }; +enum clock_type { + clock_type_dispclk = 1, + clock_type_dcfclk, + clock_type_socclk, + clock_type_pixelclk, + clock_type_phyclk, + clock_type_dppclk, + clock_type_fclk, + clock_type_dcfdsclk, + clock_type_dscclk, + clock_type_uclk, + clock_type_dramclk, +}; + + struct state_dependent_clocks { int display_clk_khz; int pixel_clk_khz; @@ -210,8 +213,6 @@ struct clk_mgr_internal { struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; /*TODO: figure out which of the below fields should be here vs in asic specific portion */ - int dentist_vco_freq_khz; - /* Cache the status of DFS-bypass feature*/ bool dfs_bypass_enabled; /* True if the DFS-bypass feature is enabled and active. */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index d8e744f366e5..05ee5295d2c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -38,8 +38,7 @@ struct dccg { struct dccg_funcs { void (*update_dpp_dto)(struct dccg *dccg, int dpp_inst, - int req_dppclk, - bool reduce_divider_only); + int req_dppclk); void (*get_dccg_ref_freq)(struct dccg *dccg, unsigned int xtalin_freq_inKhz, unsigned int *dccg_ref_freq_inKhz); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index a6297219d7fc..c0dc1d0f5cae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -52,7 +52,6 @@ struct dcn_hubbub_wm { struct dcn_hubbub_wm_set sets[4]; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dcn_hubbub_page_table_depth { DCN_PAGE_TABLE_DEPTH_1_LEVEL, DCN_PAGE_TABLE_DEPTH_2_LEVEL, @@ -101,13 +100,11 @@ struct hubbub_addr_config { } default_addrs; }; -#endif struct hubbub_funcs { void (*update_dchub)( struct hubbub *hubbub, struct dchub_init_data *dh_data); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int (*init_dchub_sys_ctx)( struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); @@ -116,7 +113,6 @@ struct hubbub_funcs { struct dcn_hubbub_virt_addr_config *va_config, int vmid); -#endif bool (*get_dcc_compression_cap)(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output); @@ -147,6 +143,7 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index c68f0ce346c7..5315f1f86b21 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -52,6 +52,8 @@ struct dmcu { enum dmcu_state dmcu_state; struct dmcu_version dmcu_version; unsigned int cached_wait_loop_number; + uint32_t psp_version; + bool auto_load_dmcu; }; struct dmcu_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 474c7194a9f8..125e42dbd3c5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -36,14 +36,10 @@ struct dpp { struct dpp_caps *caps; struct pwl_params regamma_params; struct pwl_params degamma_params; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dpp_cursor_attributes cur_attr; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params shaper_params; bool cm_bypass_mode; -#endif }; struct dpp_input_csc_matrix { @@ -56,7 +52,6 @@ struct dpp_grph_csc_adjustment { enum graphics_gamut_adjust_type gamut_adjust_type; }; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct cnv_color_keyer_params { int color_keyer_en; int color_keyer_mode; @@ -82,7 +77,6 @@ struct cnv_alpha_2bit_lut { int lut2; int lut3; }; -#endif struct dcn_dpp_state { uint32_t is_enabled; @@ -190,12 +184,8 @@ struct dpp_funcs { enum surface_pixel_format format, enum expansion_mode mode, struct dc_csc_transform input_csc_color_matrix, -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 enum dc_color_space input_color_space, struct cnv_alpha_2bit_lut *alpha_2bit_lut); -#else - enum dc_color_space input_color_space); -#endif void (*dpp_full_bypass)(struct dpp *dpp_base); @@ -224,7 +214,6 @@ struct dpp_funcs { bool dppclk_div, bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*dpp_program_blnd_lut)( struct dpp *dpp, const struct pwl_params *params); @@ -237,7 +226,6 @@ struct dpp_funcs { void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 1ddb1c6fa149..c59740084ebc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -22,13 +22,16 @@ * Authors: AMD * */ -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #ifndef __DAL_DSC_H__ #define __DAL_DSC_H__ #include "dc_dsc.h" #include "dc_hw_types.h" -#include "dc_dp_types.h" +#include "dc_types.h" +/* do not include any other headers + * or else it might break Edid Utility functionality. + */ + /* Input parameters for configuring DSC from the outside of DSC */ struct dsc_config { @@ -81,12 +84,6 @@ struct dsc_enc_caps { uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ }; -struct display_stream_compressor { - const struct dsc_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); @@ -100,4 +97,3 @@ struct dsc_funcs { }; #endif -#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index ff1a07b35c85..735f41901b88 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -51,20 +51,15 @@ enum dwb_source { dwb_src_otg3, /* for DCN1.x/DCN2.x */ }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* DCN1.x, DCN2.x support 2 pipes */ -#else -/* DCN1.x supports 2 pipes */ -#endif enum dwb_pipe { dwb_pipe0 = 0, -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) dwb_pipe1, #endif dwb_pipe_max_num, }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum dwb_frame_capture_enable { DWB_FRAME_CAPTURE_DISABLE = 0, DWB_FRAME_CAPTURE_ENABLE = 1, @@ -77,9 +72,7 @@ enum wbscl_coef_filter_type_sel { WBSCL_COEF_CHROMA_HORZ_FILTER = 3 }; -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dwb_warmup_params { bool warmup_en; /* false: normal mode, true: enable pattern generator */ bool warmup_mode; /* false: 420, true: 444 */ @@ -88,7 +81,6 @@ struct dwb_warmup_params { int warmup_width; /* Pattern width (pixels) */ int warmup_height; /* Pattern height (lines) */ }; -#endif struct dwb_caps { enum dce_version hw_version; /* DCN engine version. */ @@ -150,13 +142,11 @@ struct dwbc_funcs { struct dwbc *dwbc, bool is_new_content); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_warmup)( struct dwbc *dwbc, struct dwb_warmup_params *warmup_params); -#endif bool (*get_dwb_status)( struct dwbc *dwbc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 809b62b51a43..85a34dde8526 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -38,9 +38,7 @@ enum cursor_pitch { }; enum cursor_lines_per_chunk { -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) CURSOR_LINE_PER_CHUNK_1 = 0, /* new for DCN2 */ -#endif CURSOR_LINE_PER_CHUNK_2 = 1, CURSOR_LINE_PER_CHUNK_4, CURSOR_LINE_PER_CHUNK_8, @@ -84,7 +82,9 @@ struct hubp_funcs { void (*mem_program_viewport)( struct hubp *hubp, const struct rect *viewport, - const struct rect *viewport_c); + const struct rect *viewport_c, + enum dc_rotation_angle rotation); + /* rotation needed for Renoir workaround */ bool (*hubp_program_surface_flip_and_addr)( struct hubp *hubp, @@ -139,7 +139,6 @@ struct hubp_funcs { unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); void (*hubp_init)(struct hubp *hubp); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*dmdata_set_attributes)( struct hubp *hubp, const struct dc_dmdata_attributes *attr); @@ -159,7 +158,13 @@ struct hubp_funcs { void (*hubp_set_flip_control_surface_gsl)( struct hubp *hubp, bool enable); -#endif + + void (*validate_dml_output)( + struct hubp *hubp, + struct dc_context *ctx, + struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, + struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *dml_ttu_attr); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index f82365e2d03c..75d419081e76 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -36,9 +36,7 @@ #define MAX_AUDIOS 7 #define MAX_PIPES 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 -#endif struct gamma_curve { uint32_t offset; @@ -81,7 +79,6 @@ struct pwl_result_data { uint32_t delta_blue_reg; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_rgb { uint32_t red; uint32_t green; @@ -110,7 +107,6 @@ struct tetrahedral_params { bool use_12bits; }; -#endif /* arr_curve_points - regamma regions/segments specification * arr_points - beginning and end point specified separately (only one on DCE) @@ -195,13 +191,11 @@ enum opp_regamma { OPP_REGAMMA_USER }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT enum optc_dsc_mode { OPTC_DSC_DISABLED = 0, OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */ OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */ }; -#endif struct dc_bias_and_scale { uint16_t scale_red; @@ -224,12 +218,8 @@ enum test_pattern_mode { TEST_PATTERN_MODE_VERTICALBARS, TEST_PATTERN_MODE_HORIZONTALBARS, TEST_PATTERN_MODE_SINGLERAMP_RGB, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) TEST_PATTERN_MODE_DUALRAMP_RGB, TEST_PATTERN_MODE_XR_BIAS_RGB -#else - TEST_PATTERN_MODE_DUALRAMP_RGB -#endif }; enum test_pattern_color_format { @@ -255,6 +245,13 @@ enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR }; +enum controller_dp_color_space { + CONTROLLER_DP_COLOR_SPACE_RGB, + CONTROLLER_DP_COLOR_SPACE_YCBCR601, + CONTROLLER_DP_COLOR_SPACE_YCBCR709, + CONTROLLER_DP_COLOR_SPACE_UDEFINED +}; + enum dc_lut_mode { LUT_BYPASS, LUT_RAM_A, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index abb4e4237fb6..fb748f082c56 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -113,26 +113,21 @@ struct link_encoder { struct encoder_feature_support features; enum transmitter transmitter; enum hpd_source_id hpd_source; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool usbc_combo_phy; -#endif }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct link_enc_state { uint32_t dphy_fec_en; uint32_t dphy_fec_ready_shadow; uint32_t dphy_fec_active_status; + uint32_t dp_link_training_complete; }; -#endif struct link_encoder_funcs { -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*read_state)( struct link_encoder *enc, struct link_enc_state *s); -#endif bool (*validate_output_with_stream)( struct link_encoder *enc, const struct dc_stream_state *stream); void (*hw_init)(struct link_encoder *enc); @@ -174,7 +169,6 @@ struct link_encoder_funcs { unsigned int (*get_dig_frontend)(struct link_encoder *enc); void (*destroy)(struct link_encoder **enc); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*fec_set_enable)(struct link_encoder *enc, bool enable); @@ -182,8 +176,11 @@ struct link_encoder_funcs { bool ready); bool (*fec_is_active)(struct link_encoder *enc); -#endif bool (*is_in_alt_mode) (struct link_encoder *enc); + + void (*get_max_link_cap)(struct link_encoder *enc, + struct dc_link_settings *link_settings); + enum signal_type (*get_dig_mode)( struct link_encoder *enc); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index e8668388581b..2e2310f1901a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -40,10 +40,9 @@ struct cstate_pstate_watermarks_st { struct dcn_watermarks { uint32_t pte_meta_urgent_ns; uint32_t urgent_ns; -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_flip; -#endif + int32_t urgent_latency_ns; struct cstate_pstate_watermarks_st cstate_pstate; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 58826be81395..094afc4c8173 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -31,9 +31,7 @@ #define MAX_MPCC 6 #define MAX_OPP 6 -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB 1 -#endif enum mpc_output_csc_mode { MPC_OUTPUT_CSC_DISABLE = 0, @@ -66,14 +64,12 @@ struct mpcc_blnd_cfg { int global_alpha; bool overlap_only; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) /* MPCC top/bottom gain settings */ int bottom_gain_mode; int background_color_bpc; int top_gain; int bottom_inside_gain; int bottom_outside_gain; -#endif }; struct mpcc_sm_cfg { @@ -90,7 +86,6 @@ struct mpcc_sm_cfg { int force_next_field_polarity; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct mpc_denorm_clamp { int clamp_max_r_cr; int clamp_min_r_cr; @@ -99,7 +94,6 @@ struct mpc_denorm_clamp { int clamp_max_b_cb; int clamp_min_b_cb; }; -#endif /* * MPCC connection and blending configuration for a single MPCC instance. @@ -126,10 +120,8 @@ struct mpc { struct dc_context *ctx; struct mpcc mpcc_array[MAX_MPCC]; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params blender_params; bool cm_bypass_mode; -#endif }; struct mpcc_state { @@ -230,7 +222,6 @@ struct mpc_funcs { struct mpc *mpc, struct mpc_tree *tree); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_denorm)(struct mpc *mpc, int opp_id, enum dc_color_depth output_depth); @@ -258,7 +249,6 @@ struct mpc_funcs { struct mpc *mpc, int mpcc_id, bool power_on); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 957e9047381a..7575564b2265 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -208,6 +208,7 @@ struct output_pixel_processor { struct mpc_tree mpc_tree_params; bool mpcc_disconnect_pending[MAX_PIPES]; const struct opp_funcs *funcs; + uint32_t dyn_expansion; }; enum fmt_stereo_action { @@ -262,9 +263,7 @@ struct oppbuf_params { enum oppbuf_display_segmentation mso_segmentation; uint32_t mso_overlap_pixel_num; uint32_t pixel_repetition; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) uint32_t num_segment_padded_pixels; -#endif }; struct opp_funcs { @@ -304,10 +303,10 @@ struct opp_funcs { struct output_pixel_processor *opp, bool enable); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*opp_set_disp_pattern_generator)( struct output_pixel_processor *opp, enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, @@ -323,7 +322,6 @@ struct opp_funcs { void (*opp_program_left_edge_extra_pixel)( struct output_pixel_processor *opp, bool count); -#endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe9b7a10a1c3..351b387ad606 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -65,13 +65,11 @@ struct audio_clock_info { uint32_t cts_48khz; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) enum dynamic_metadata_mode { dmdata_dp, dmdata_hdmi, dmdata_dolby_vision }; -#endif struct encoder_info_frame { /* auxiliary video information */ @@ -90,9 +88,7 @@ struct encoder_info_frame { struct encoder_unblank_param { struct dc_link_settings link_settings; struct dc_crtc_timing timing; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int opp_cnt; -#endif }; struct encoder_set_dp_phy_pattern_param { @@ -109,7 +105,6 @@ struct stream_encoder { enum engine_id id; }; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct enc_state { uint32_t dsc_mode; // DISABLED 0; 1 or 2 indicate enabled state. uint32_t dsc_slice_width; @@ -119,13 +114,13 @@ struct enc_state { uint32_t sec_gsp_pps_enable; uint32_t sec_stream_enable; }; -#endif struct stream_encoder_funcs { void (*dp_set_stream_attribute)( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting); void (*hdmi_set_stream_attribute)( @@ -214,8 +209,11 @@ struct stream_encoder_funcs { unsigned int (*dig_source_otg)( struct stream_encoder *enc); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + bool (*dp_get_pixel_format)( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s); void (*dp_set_dsc_config)( @@ -227,7 +225,6 @@ struct stream_encoder_funcs { void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps); -#endif void (*set_dynamic_metadata)(struct stream_encoder *enc, bool enable, @@ -237,7 +234,6 @@ struct stream_encoder_funcs { void (*dp_set_odm_combine)( struct stream_encoder *enc, bool odm_combine); -#endif }; #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 6196cc32356e..2d3efd71fa51 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -195,10 +195,8 @@ struct timing_generator_funcs { void (*lock)(struct timing_generator *tg); void (*lock_doublebuffer_disable)(struct timing_generator *tg); void (*lock_doublebuffer_enable)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void(*triplebuffer_unlock)(struct timing_generator *tg); void(*triplebuffer_lock)(struct timing_generator *tg); -#endif void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst); void (*enable_crtc_reset)(struct timing_generator *tg, @@ -235,7 +233,6 @@ struct timing_generator_funcs { bool (*is_optc_underflow_occurred)(struct timing_generator *tg); void (*clear_optc_underflow)(struct timing_generator *tg); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) void (*set_dwb_source)(struct timing_generator *optc, uint32_t dwb_pipe_inst); @@ -243,7 +240,6 @@ struct timing_generator_funcs { uint32_t *num_of_input_segments, uint32_t *seg0_src_sel, uint32_t *seg1_src_sel); -#endif /** * Configure CRCs for the given timing generator. Return false if TG is @@ -261,17 +257,16 @@ struct timing_generator_funcs { void (*program_manual_trigger)(struct timing_generator *optc); void (*setup_manual_trigger)(struct timing_generator *optc); + bool (*get_hw_timing)(struct timing_generator *optc, + struct dc_crtc_timing *hw_crtc_timing); void (*set_vtg_params)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*set_dsc_config)(struct timing_generator *optc, enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); -#endif void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing); @@ -279,7 +274,6 @@ struct timing_generator_funcs { void (*set_gsl_source_select)(struct timing_generator *optc, int group_idx, uint32_t gsl_ready_signal); -#endif }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3a938cd414ea..e9c6021a5372 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,310 +32,157 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" -enum pipe_gating_control { - PIPE_GATING_CONTROL_DISABLE = 0, - PIPE_GATING_CONTROL_ENABLE, - PIPE_GATING_CONTROL_INIT -}; - enum vline_select { VLINE0, VLINE1 }; -struct dce_hwseq_wa { - bool blnd_crtc_trigger; - bool DEGVIDCN10_253; - bool false_optc_underflow; - bool DEGVIDCN10_254; - bool DEGVIDCN21; -}; - -struct hwseq_wa_state { - bool DEGVIDCN10_253_applied; -}; - -struct dce_hwseq { - struct dc_context *ctx; - const struct dce_hwseq_registers *regs; - const struct dce_hwseq_shift *shifts; - const struct dce_hwseq_mask *masks; - struct dce_hwseq_wa wa; - struct hwseq_wa_state wa_state; -}; - struct pipe_ctx; struct dc_state; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_stream_status; struct dc_writeback_info; -#endif struct dchub_init_data; struct dc_static_screen_events; struct resource_pool; -struct resource_context; -struct stream_resource; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config; struct dc_virtual_addr_space_config; -#endif -struct hubp; struct dpp; +struct dce_hwseq; struct hw_sequencer_funcs { + /* Embedded Display Related */ + void (*edp_power_control)(struct dc_link *link, bool enable); + void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); - void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); - + /* Pipe Programming Related */ void (*init_hw)(struct dc *dc); - - void (*init_pipes)(struct dc *dc, struct dc_state *context); - - enum dc_status (*apply_ctx_to_hw)( - struct dc *dc, struct dc_state *context); - - void (*reset_hw_ctx_wrap)( - struct dc *dc, struct dc_state *context); - - void (*apply_ctx_for_surface)( - struct dc *dc, + void (*enable_accelerated_mode)(struct dc *dc, + struct dc_state *context); + enum dc_status (*apply_ctx_to_hw)(struct dc *dc, + struct dc_state *context); + void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, - int num_planes, + int num_planes, struct dc_state *context); + void (*program_front_end_for_ctx)(struct dc *dc, struct dc_state *context); - - void (*program_gamut_remap)( + void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*program_output_csc)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum dc_color_space colorspace, - uint16_t *matrix, - int opp_id); - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - void (*program_triplebuffer)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool enableTripleBuffer); - void (*set_flip_control_gsl)( - struct pipe_ctx *pipe_ctx, - bool flip_immediate); -#endif - - void (*update_plane_addr)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*plane_atomic_disconnect)( - struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*update_dchub)( - struct dce_hwseq *hws, - struct dchub_init_data *dh_data); - -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 - int (*init_sys_ctx)( - struct dce_hwseq *hws, - struct dc *dc, - struct dc_phy_addr_space_config *pa_config); - void (*init_vm_ctx)( - struct dce_hwseq *hws, - struct dc *dc, - struct dc_virtual_addr_space_config *va_config, - int vmid); -#endif - void (*update_mpcc)( - struct dc *dc, - struct pipe_ctx *pipe_ctx); - - void (*update_pending_status)( + void (*update_dchub)(struct dce_hwseq *hws, + struct dchub_init_data *dh_data); + void (*wait_for_mpcc_disconnect)(struct dc *dc, + struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); - - bool (*set_input_transfer_func)( - struct pipe_ctx *pipe_ctx, - const struct dc_plane_state *plane_state); - - bool (*set_output_transfer_func)( - struct pipe_ctx *pipe_ctx, - const struct dc_stream_state *stream); - - void (*power_down)(struct dc *dc); - - void (*enable_accelerated_mode)(struct dc *dc, struct dc_state *context); - - void (*enable_timing_synchronization)( - struct dc *dc, - int group_index, - int group_size, - struct pipe_ctx *grouped_pipes[]); - - void (*enable_per_frame_crtc_position_reset)( - struct dc *dc, - int group_size, + void (*program_triplebuffer)(const struct dc *dc, + struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); + void (*update_pending_status)(struct pipe_ctx *pipe_ctx); + + /* Pipe Lock Related */ + void (*pipe_control_lock_global)(struct dc *dc, + struct pipe_ctx *pipe, bool lock); + void (*pipe_control_lock)(struct dc *dc, + struct pipe_ctx *pipe, bool lock); + void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, + bool flip_immediate); + + /* Timing Related */ + void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, + struct crtc_position *position); + int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx); + void (*enable_per_frame_crtc_position_reset)(struct dc *dc, + int group_size, struct pipe_ctx *grouped_pipes[]); + void (*enable_timing_synchronization)(struct dc *dc, + int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); + void (*setup_periodic_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum vline_select vline); + void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, + unsigned int vmin, unsigned int vmax, + unsigned int vmid, unsigned int vmid_frame_number); + void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, + int num_pipes, + const struct dc_static_screen_events *events); - void (*enable_display_pipe_clock_gating)( - struct dc_context *ctx, - bool clock_gating); - - bool (*enable_display_power_gating)( - struct dc *dc, - uint8_t controller_id, - struct dc_bios *dcb, - enum pipe_gating_control power_gating); - - void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*update_info_frame)(struct pipe_ctx *pipe_ctx); - - void (*send_immediate_sdp_message)( - struct pipe_ctx *pipe_ctx, - const uint8_t *custom_sdp_message, - unsigned int sdp_message_size); - + /* Stream Related */ void (*enable_stream)(struct pipe_ctx *pipe_ctx); - void (*disable_stream)(struct pipe_ctx *pipe_ctx); - + void (*blank_stream)(struct pipe_ctx *pipe_ctx); void (*unblank_stream)(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); - void (*blank_stream)(struct pipe_ctx *pipe_ctx); - - void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); + /* Bandwidth Related */ + void (*prepare_bandwidth)(struct dc *dc, struct dc_state *context); + bool (*update_bandwidth)(struct dc *dc, struct dc_state *context); + void (*optimize_bandwidth)(struct dc *dc, struct dc_state *context); - void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); - - void (*pipe_control_lock)( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); - - void (*pipe_control_lock_global)( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); - void (*blank_pixel_data)( - struct dc *dc, + /* Infopacket Related */ + void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); + void (*send_immediate_sdp_message)( struct pipe_ctx *pipe_ctx, - bool blank); - - void (*prepare_bandwidth)( - struct dc *dc, - struct dc_state *context); - void (*optimize_bandwidth)( - struct dc *dc, - struct dc_state *context); - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - bool (*update_bandwidth)( - struct dc *dc, - struct dc_state *context); + const uint8_t *custom_sdp_message, + unsigned int sdp_message_size); + void (*update_info_frame)(struct pipe_ctx *pipe_ctx); + void (*set_dmdata_attributes)(struct pipe_ctx *pipe); void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx); bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx); -#endif - - void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, - unsigned int vmin, unsigned int vmax, - unsigned int vmid, unsigned int vmid_frame_number); - - void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes, - struct crtc_position *position); - - void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, - int num_pipes, const struct dc_static_screen_events *events); - - enum dc_status (*enable_stream_timing)( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, - struct dc *dc); - - void (*setup_stereo)( - struct pipe_ctx *pipe_ctx, - struct dc *dc); - - void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); - - void (*log_hw_state)(struct dc *dc, - struct dc_log_buffer_ctx *log_ctx); - void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); - void (*clear_status_bits)(struct dc *dc, unsigned int mask); - - void (*wait_for_mpcc_disconnect)(struct dc *dc, - struct resource_pool *res_pool, - struct pipe_ctx *pipe_ctx); - - void (*edp_power_control)( - struct dc_link *link, - bool enable); - void (*edp_backlight_control)( - struct dc_link *link, - bool enable); - void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); + /* Cursor Related */ void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); - void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline); - void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx); - bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*init_blank)(struct dc *dc, struct timing_generator *tg); - void (*disable_vga)(struct dce_hwseq *hws); - void (*bios_golden_init)(struct dc *dc); - void (*plane_atomic_power_down)(struct dc *dc, - struct dpp *dpp, - struct hubp *hubp); - - void (*plane_atomic_disable)( - struct dc *dc, struct pipe_ctx *pipe_ctx); - - void (*enable_power_gating_plane)( - struct dce_hwseq *hws, - bool enable); - - void (*dpp_pg_control)( - struct dce_hwseq *hws, - unsigned int dpp_inst, - bool power_on); - - void (*hubp_pg_control)( - struct dce_hwseq *hws, - unsigned int hubp_inst, - bool power_on); - - void (*dsc_pg_control)( - struct dce_hwseq *hws, - unsigned int dsc_inst, - bool power_on); - + /* Colour Related */ + void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); + void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, int opp_id); -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); - void (*program_all_writeback_pipes_in_tree)( + /* VM Related */ + int (*init_sys_ctx)(struct dce_hwseq *hws, struct dc *dc, - const struct dc_stream_state *stream, - struct dc_state *context); + struct dc_phy_addr_space_config *pa_config); + void (*init_vm_ctx)(struct dce_hwseq *hws, + struct dc *dc, + struct dc_virtual_addr_space_config *va_config, + int vmid); + + /* Writeback Related */ void (*update_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*enable_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); -#endif - enum dc_status (*set_clock)(struct dc *dc, - enum dc_clock_type clock_type, - uint32_t clk_khz, - uint32_t stepping); - void (*get_clock)(struct dc *dc, + /* Clock Related */ + enum dc_status (*set_clock)(struct dc *dc, enum dc_clock_type clock_type, + uint32_t clk_khz, uint32_t stepping); + void (*get_clock)(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); + void (*optimize_pwr_state)(const struct dc *dc, + struct dc_state *context); + void (*exit_optimized_pwr_state)(const struct dc *dc, + struct dc_state *context); + + /* Audio Related */ + void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx); + void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx); + + /* Stereo 3D Related */ + void (*setup_stereo)(struct pipe_ctx *pipe_ctx, struct dc *dc); + + /* HW State Logging Related */ + void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); + void (*get_hw_state)(struct dc *dc, char *pBuf, + unsigned int bufSize, unsigned int mask); + void (*clear_status_bits)(struct dc *dc, unsigned int mask); + }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h new file mode 100644 index 000000000000..8ba06f015975 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -0,0 +1,156 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HW_SEQUENCER_PRIVATE_H__ +#define __DC_HW_SEQUENCER_PRIVATE_H__ + +#include "dc_types.h" + +enum pipe_gating_control { + PIPE_GATING_CONTROL_DISABLE = 0, + PIPE_GATING_CONTROL_ENABLE, + PIPE_GATING_CONTROL_INIT +}; + +struct dce_hwseq_wa { + bool blnd_crtc_trigger; + bool DEGVIDCN10_253; + bool false_optc_underflow; + bool DEGVIDCN10_254; + bool DEGVIDCN21; +}; + +struct hwseq_wa_state { + bool DEGVIDCN10_253_applied; +}; + +struct pipe_ctx; +struct dc_state; +struct dc_stream_status; +struct dc_writeback_info; +struct dchub_init_data; +struct dc_static_screen_events; +struct resource_pool; +struct resource_context; +struct stream_resource; +struct dc_phy_addr_space_config; +struct dc_virtual_addr_space_config; +struct hubp; +struct dpp; +struct dce_hwseq; +struct timing_generator; +struct tg_color; +struct output_pixel_processor; + +struct hwseq_private_funcs { + + void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_pipes)(struct dc *dc, struct dc_state *context); + void (*reset_hw_ctx_wrap)(struct dc *dc, struct dc_state *context); + void (*update_plane_addr)(const struct dc *dc, + struct pipe_ctx *pipe_ctx); + void (*plane_atomic_disconnect)(struct dc *dc, + struct pipe_ctx *pipe_ctx); + void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); + bool (*set_input_transfer_func)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); + bool (*set_output_transfer_func)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + const struct dc_stream_state *stream); + void (*power_down)(struct dc *dc); + void (*enable_display_pipe_clock_gating)(struct dc_context *ctx, + bool clock_gating); + bool (*enable_display_power_gating)(struct dc *dc, + uint8_t controller_id, + struct dc_bios *dcb, + enum pipe_gating_control power_gating); + void (*blank_pixel_data)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); + enum dc_status (*enable_stream_timing)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); + void (*edp_backlight_control)(struct dc_link *link, + bool enable); + void (*setup_vupdate_interrupt)(struct dc *dc, + struct pipe_ctx *pipe_ctx); + bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_blank)(struct dc *dc, struct timing_generator *tg); + void (*disable_vga)(struct dce_hwseq *hws); + void (*bios_golden_init)(struct dc *dc); + void (*plane_atomic_power_down)(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp); + void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*enable_power_gating_plane)(struct dce_hwseq *hws, + bool enable); + void (*dpp_pg_control)(struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); + void (*hubp_pg_control)(struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); + void (*dsc_pg_control)(struct dce_hwseq *hws, + unsigned int dsc_inst, + bool power_on); + void (*update_odm)(struct dc *dc, struct dc_state *context, + struct pipe_ctx *pipe_ctx); + void (*program_all_writeback_pipes_in_tree)(struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context); + bool (*s0i3_golden_init_wa)(struct dc *dc); + void (*get_surface_visual_confirm_color)( + const struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void (*get_hdr_visual_confirm_color)(struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + void (*verify_allow_pstate_change_high)(struct dc *dc); + void (*program_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); + bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); + void (*dccg_init)(struct dce_hwseq *hws); + bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); + bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx, + const struct dc_plane_state *plane_state); +}; + +struct dce_hwseq { + struct dc_context *ctx; + const struct dce_hwseq_registers *regs; + const struct dce_hwseq_shift *shifts; + const struct dce_hwseq_mask *masks; + struct dce_hwseq_wa wa; + struct hwseq_wa_state wa_state; + struct hwseq_private_funcs funcs; + +}; + +#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 4eff5d38a2f9..9af7ee5bc8ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -60,11 +60,13 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal); bool dp_set_hw_training_pattern( struct dc_link *link, - enum dc_dp_training_pattern pattern); + enum dc_dp_training_pattern pattern, + uint32_t offset); void dp_set_hw_lane_settings( struct dc_link *link, - const struct link_training_settings *link_settings); + const struct link_training_settings *link_settings, + uint32_t offset); void dp_set_hw_test_pattern( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 8503d9cc4763..47e307388581 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -485,4 +485,23 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +/* register offload macros + * + * instead of MMIO to register directly, in some cases we want + * to gather register sequence and execute the register sequence + * from another thread so we optimize time required for lengthy ops + */ + +/* start gathering register sequence */ +#define REG_SEQ_START() \ + reg_sequence_start_gather(CTX) + +/* start execution of register sequence gathered since REG_SEQ_START */ +#define REG_SEQ_SUBMIT() \ + reg_sequence_start_execute(CTX) + +/* wait for the last REG_SEQ_SUBMIT to finish */ +#define REG_SEQ_WAIT_DONE() \ + reg_sequence_wait_done(CTX) + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index bef224bf803e..7a85abc53d05 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -46,12 +46,8 @@ struct resource_caps { int num_pll; int num_dwb; int num_ddc; -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 int num_vmid; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT int num_dsc; -#endif -#endif }; struct resource_straps { diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index ea75420fc876..0f682ac53bb2 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -60,27 +60,23 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12) ############################################################################### # DCN 1x ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 +ifdef CONFIG_DRM_AMD_DC_DCN IRQ_DCN1 = irq_service_dcn10.o AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1) -endif ############################################################################### # DCN 20 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_0 IRQ_DCN2 = irq_service_dcn20.o AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2) -endif ############################################################################### # DCN 21 ############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN2_1 IRQ_DCN21 = irq_service_dcn21.o AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 1a581c464345..378cc11aa047 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -204,7 +204,7 @@ bool dce110_vblank_set(struct irq_service *irq_service, bool enable) { struct dc_context *dc_ctx = irq_service->ctx; - struct dc *core_dc = irq_service->ctx->dc; + struct dc *dc = irq_service->ctx->dc; enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(irq_service->ctx->dc, info->src_id, @@ -212,7 +212,7 @@ bool dce110_vblank_set(struct irq_service *irq_service, uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK; struct timing_generator *tg = - core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; + dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; if (enable) { if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { @@ -403,7 +403,7 @@ static const struct irq_service_funcs irq_service_funcs_dce110 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct(struct irq_service *irq_service, +static void dce110_irq_construct(struct irq_service *irq_service, struct irq_service_init_data *init_data) { dal_irq_service_construct(irq_service, init_data); @@ -421,6 +421,6 @@ dal_irq_service_dce110_create(struct irq_service_init_data *init_data) if (!irq_service) return NULL; - construct(irq_service, init_data); + dce110_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 15380336cb51..2fe4703395f3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -273,7 +273,7 @@ static const struct irq_service_funcs irq_service_funcs_dce120 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct( +static void dce120_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -292,6 +292,6 @@ struct irq_service *dal_irq_service_dce120_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dce120_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 281fee8ad1e5..17e426b80a00 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -283,7 +283,7 @@ static const struct irq_service_funcs irq_service_funcs_dce80 = { .to_dal_irq_source = to_dal_irq_source_dce110 }; -static void construct( +static void dce80_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -302,7 +302,7 @@ struct irq_service *dal_irq_service_dce80_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dce80_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index cc8e7dedccce..f956b3bde680 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -355,7 +355,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn10 = { .to_dal_irq_source = to_dal_irq_source_dcn10 }; -static void construct( +static void dcn10_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -374,6 +374,6 @@ struct irq_service *dal_irq_service_dcn10_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn10_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 5db29bf582d3..2a1fea501f8c 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -359,7 +359,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn20 = { .to_dal_irq_source = to_dal_irq_source_dcn20 }; -static void construct( +static void dcn20_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -378,6 +378,6 @@ struct irq_service *dal_irq_service_dcn20_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn20_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index cbe7818529bb..1b971265418b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -350,7 +350,7 @@ static const struct irq_service_funcs irq_service_funcs_dcn21 = { .to_dal_irq_source = to_dal_irq_source_dcn21 }; -static void construct( +static void dcn21_irq_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data) { @@ -369,6 +369,6 @@ struct irq_service *dal_irq_service_dcn21_create( if (!irq_service) return NULL; - construct(irq_service, init_data); + dcn21_irq_construct(irq_service, init_data); return irq_service; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 0878550a8178..33053b9fe6bd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -38,7 +38,7 @@ #include "dce120/irq_service_dce120.h" -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dcn10/irq_service_dcn10.h" #endif diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index 30ec80ac6fc8..13b9a9bb32c8 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -29,6 +29,7 @@ #include <linux/kgdb.h> #include <linux/kref.h> #include <linux/types.h> +#include <linux/slab.h> #include <asm/byteorder.h> @@ -48,7 +49,7 @@ #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) +#if defined(CONFIG_DRM_AMD_DC_DCN) #include <asm/fpu/api.h> #endif diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index ff664bdb1482..b8040da94b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -32,6 +32,7 @@ static void virtual_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, uint32_t enable_sdp_splitting) {} static void virtual_stream_encoder_hdmi_set_stream_attribute( @@ -81,22 +82,14 @@ static void virtual_stream_encoder_reset_hdmi_stream_attribute( struct stream_encoder *enc) {} -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT static void virtual_enc_dp_set_odm_combine( struct stream_encoder *enc, bool odm_combine) {} -#endif -#endif static const struct stream_encoder_funcs virtual_str_enc_funcs = { -#ifdef CONFIG_DRM_AMD_DC_DCN2_0 -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .dp_set_odm_combine = virtual_enc_dp_set_odm_combine, -#endif -#endif .dp_set_stream_attribute = virtual_stream_encoder_dp_set_stream_attribute, .hdmi_set_stream_attribute = diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h new file mode 100644 index 000000000000..b10728f33f62 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -0,0 +1,255 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_H_ +#define _DMUB_CMD_H_ + +#include "dmub_types.h" +#include "dmub_cmd_dal.h" +#include "dmub_cmd_vbios.h" +#include "atomfirmware.h" + +#define DMUB_RB_CMD_SIZE 64 +#define DMUB_RB_MAX_ENTRY 128 +#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY) +#define REG_SET_MASK 0xFFFF + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_type { + DMUB_CMD__NULL = 0, + DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE = 1, + DMUB_CMD__REG_SEQ_FIELD_UPDATE_SEQ = 2, + DMUB_CMD__REG_SEQ_BURST_WRITE = 3, + DMUB_CMD__REG_REG_WAIT = 4, + DMUB_CMD__PSR = 64, + DMUB_CMD__VBIOS = 128, +}; + +#pragma pack(push, 1) + +struct dmub_cmd_header { + unsigned int type : 8; + unsigned int sub_type : 8; + unsigned int reserved0 : 8; + unsigned int payload_bytes : 6; /* up to 60 bytes */ + unsigned int reserved1 : 2; +}; + +/* + * Read modify write + * + * 60 payload bytes can hold up to 5 sets of read modify writes, + * each take 3 dwords. + * + * number of sequences = header.payload_bytes / sizeof(struct dmub_cmd_read_modify_write_sequence) + * + * modify_mask = 0xffff'ffff means all fields are going to be updated. in this case + * command parser will skip the read and we can use modify_mask = 0xffff'ffff as reg write + */ +struct dmub_cmd_read_modify_write_sequence { + uint32_t addr; + uint32_t modify_mask; + uint32_t modify_value; +}; + +#define DMUB_READ_MODIFY_WRITE_SEQ__MAX 5 +struct dmub_rb_cmd_read_modify_write { + struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE + struct dmub_cmd_read_modify_write_sequence seq[DMUB_READ_MODIFY_WRITE_SEQ__MAX]; +}; + +/* + * Update a register with specified masks and values sequeunce + * + * 60 payload bytes can hold address + up to 7 sets of mask/value combo, each take 2 dword + * + * number of field update sequence = (header.payload_bytes - sizeof(addr)) / sizeof(struct read_modify_write_sequence) + * + * + * USE CASE: + * 1. auto-increment register where additional read would update pointer and produce wrong result + * 2. toggle a bit without read in the middle + */ + +struct dmub_cmd_reg_field_update_sequence { + uint32_t modify_mask; // 0xffff'ffff to skip initial read + uint32_t modify_value; +}; + +#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7 + +struct dmub_rb_cmd_reg_field_update_sequence { + struct dmub_cmd_header header; + uint32_t addr; + struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX]; +}; + + +/* + * Burst write + * + * support use case such as writing out LUTs. + * + * 60 payload bytes can hold up to 14 values to write to given address + * + * number of payload = header.payload_bytes / sizeof(struct read_modify_write_sequence) + */ +#define DMUB_BURST_WRITE_VALUES__MAX 14 +struct dmub_rb_cmd_burst_write { + struct dmub_cmd_header header; // type = DMUB_CMD__REG_SEQ_BURST_WRITE + uint32_t addr; + uint32_t write_values[DMUB_BURST_WRITE_VALUES__MAX]; +}; + + +struct dmub_rb_cmd_common { + struct dmub_cmd_header header; + uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)]; +}; + +struct dmub_cmd_reg_wait_data { + uint32_t addr; + uint32_t mask; + uint32_t condition_field_value; + uint32_t time_out_us; +}; + +struct dmub_rb_cmd_reg_wait { + struct dmub_cmd_header header; + struct dmub_cmd_reg_wait_data reg_wait; +}; + +struct dmub_cmd_digx_encoder_control_data { + union dig_encoder_control_parameters_v1_5 dig; +}; + +struct dmub_rb_cmd_digx_encoder_control { + struct dmub_cmd_header header; + struct dmub_cmd_digx_encoder_control_data encoder_control; +}; + +struct dmub_cmd_set_pixel_clock_data { + struct set_pixel_clock_parameter_v1_7 clk; +}; + +struct dmub_rb_cmd_set_pixel_clock { + struct dmub_cmd_header header; + struct dmub_cmd_set_pixel_clock_data pixel_clock; +}; + +struct dmub_cmd_enable_disp_power_gating_data { + struct enable_disp_power_gating_parameters_v2_1 pwr; +}; + +struct dmub_rb_cmd_enable_disp_power_gating { + struct dmub_cmd_header header; + struct dmub_cmd_enable_disp_power_gating_data power_gating; +}; + +struct dmub_cmd_dig1_transmitter_control_data { + struct dig_transmitter_control_parameters_v1_6 dig; +}; + +struct dmub_rb_cmd_dig1_transmitter_control { + struct dmub_cmd_header header; + struct dmub_cmd_dig1_transmitter_control_data transmitter_control; +}; + +struct dmub_rb_cmd_dpphy_init { + struct dmub_cmd_header header; + uint8_t reserved[60]; +}; + +struct dmub_cmd_psr_copy_settings_data { + uint32_t reg1; + uint32_t reg2; + uint32_t reg3; +}; + +struct dmub_rb_cmd_psr_copy_settings { + struct dmub_cmd_header header; + struct dmub_cmd_psr_copy_settings_data psr_copy_settings_data; +}; + +struct dmub_cmd_psr_set_level_data { + uint16_t psr_level; +}; + +struct dmub_rb_cmd_psr_set_level { + struct dmub_cmd_header header; + struct dmub_cmd_psr_set_level_data psr_set_level_data; +}; + +struct dmub_rb_cmd_psr_disable { + struct dmub_cmd_header header; +}; + +struct dmub_rb_cmd_psr_enable { + struct dmub_cmd_header header; +}; + +struct dmub_cmd_psr_notify_vblank_data { + uint32_t vblank_int; // Which vblank interrupt was triggered +}; + +struct dmub_rb_cmd_notify_vblank { + struct dmub_cmd_header header; + struct dmub_cmd_psr_notify_vblank_data psr_notify_vblank_data; +}; + +struct dmub_cmd_psr_notify_static_state_data { + uint32_t ss_int; // Which static screen interrupt was triggered + uint32_t ss_enter; // Enter (1) or exit (0) static screen +}; + +struct dmub_rb_cmd_psr_notify_static_state { + struct dmub_cmd_header header; + struct dmub_cmd_psr_notify_static_state_data psr_notify_static_state_data; +}; + +union dmub_rb_cmd { + struct dmub_rb_cmd_read_modify_write read_modify_write; + struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; + struct dmub_rb_cmd_burst_write burst_write; + struct dmub_rb_cmd_reg_wait reg_wait; + struct dmub_rb_cmd_common cmd_common; + struct dmub_rb_cmd_digx_encoder_control digx_encoder_control; + struct dmub_rb_cmd_set_pixel_clock set_pixel_clock; + struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating; + struct dmub_rb_cmd_dpphy_init dpphy_init; + struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control; + struct dmub_rb_cmd_psr_enable psr_enable; + struct dmub_rb_cmd_psr_disable psr_disable; + struct dmub_rb_cmd_psr_copy_settings psr_copy_settings; + struct dmub_rb_cmd_psr_set_level psr_set_level; +}; + +#pragma pack(pop) + +#endif /* _DMUB_CMD_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h new file mode 100644 index 000000000000..14f13e8a6f3b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_DAL_H_ +#define _DMUB_CMD_DAL_H_ + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_psr_type { + DMUB_CMD__PSR_ENABLE = 0, + DMUB_CMD__PSR_DISABLE = 1, + DMUB_CMD__PSR_COPY_SETTINGS = 2, + DMUB_CMD__PSR_SET_LEVEL = 3, +}; + +#endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h new file mode 100644 index 000000000000..b6deb8e2590f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h @@ -0,0 +1,41 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_CMD_VBIOS_H_ +#define _DMUB_CMD_VBIOS_H_ + +/* + * Command IDs should be treated as stable ABI. + * Do not reuse or modify IDs. + */ + +enum dmub_cmd_vbios_type { + DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0, + DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1, + DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2, + DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, +}; + +#endif /* _DMUB_CMD_VBIOS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h new file mode 100644 index 000000000000..c87b1ba7590e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h @@ -0,0 +1,73 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_FW_STATE_H_ +#define _DMUB_FW_STATE_H_ + +#include "dmub_types.h" + +#pragma pack(push, 1) + +struct dmub_fw_state { + /** + * @phy_initialized_during_fw_boot: + * + * Detects if VBIOS/VBL has ran before firmware boot. + * A value of 1 will usually mean S0i3 boot. + */ + uint8_t phy_initialized_during_fw_boot; + + /** + * @intialized_phy: + * + * Bit vector of initialized PHY. + */ + uint8_t initialized_phy; + + /** + * @enabled_phy: + * + * Bit vector of enabled PHY for DP alt mode switch tracking. + */ + uint8_t enabled_phy; + + /** + * @dmcu_fw_loaded: + * + * DMCU auto load state. + */ + uint8_t dmcu_fw_loaded; + + /** + * @psr_state: + * + * PSR state tracking. + */ + uint8_t psr_state; +}; + +#pragma pack(pop) + +#endif /* _DMUB_FW_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h new file mode 100644 index 000000000000..ac22744eaa94 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h @@ -0,0 +1,129 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_RB_H_ +#define _DMUB_RB_H_ + +#include "dmub_types.h" +#include "dmub_cmd.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +struct dmub_cmd_header; + +struct dmub_rb_init_params { + void *ctx; + void *base_address; + uint32_t capacity; +}; + +struct dmub_rb { + void *base_address; + uint32_t data_count; + uint32_t rptr; + uint32_t wrpt; + uint32_t capacity; + + void *ctx; + void *dmub; +}; + + +static inline bool dmub_rb_empty(struct dmub_rb *rb) +{ + return (rb->wrpt == rb->rptr); +} + +static inline bool dmub_rb_full(struct dmub_rb *rb) +{ + uint32_t data_count; + + if (rb->wrpt >= rb->rptr) + data_count = rb->wrpt - rb->rptr; + else + data_count = rb->capacity - (rb->rptr - rb->wrpt); + + return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE)); +} + +static inline bool dmub_rb_push_front(struct dmub_rb *rb, + const struct dmub_cmd_header *cmd) +{ + uint8_t *wt_ptr = (uint8_t *)(rb->base_address) + rb->wrpt; + + if (dmub_rb_full(rb)) + return false; + + dmub_memcpy(wt_ptr, cmd, DMUB_RB_CMD_SIZE); + rb->wrpt += DMUB_RB_CMD_SIZE; + + if (rb->wrpt >= rb->capacity) + rb->wrpt %= rb->capacity; + + return true; +} + +static inline bool dmub_rb_front(struct dmub_rb *rb, + struct dmub_cmd_header *cmd) +{ + uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr; + + if (dmub_rb_empty(rb)) + return false; + + dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE); + + return true; +} + +static inline bool dmub_rb_pop_front(struct dmub_rb *rb) +{ + if (dmub_rb_empty(rb)) + return false; + + rb->rptr += DMUB_RB_CMD_SIZE; + + if (rb->rptr >= rb->capacity) + rb->rptr %= rb->capacity; + + return true; +} + +static inline void dmub_rb_init(struct dmub_rb *rb, + struct dmub_rb_init_params *init_params) +{ + rb->base_address = init_params->base_address; + rb->capacity = init_params->capacity; + rb->rptr = 0; + rb->wrpt = 0; +} + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_RB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h new file mode 100644 index 000000000000..528243e35add --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -0,0 +1,522 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_SRV_H_ +#define _DMUB_SRV_H_ + +/** + * DOC: DMUB interface and operation + * + * DMUB is the interface to the display DMCUB microcontroller on DCN hardware. + * It delegates hardware initialization and command submission to the + * microcontroller. DMUB is the shortname for DMCUB. + * + * This interface is not thread-safe. Ensure that all access to the interface + * is properly synchronized by the caller. + * + * Initialization and usage of the DMUB service should be done in the + * steps given below: + * + * 1. dmub_srv_create() + * 2. dmub_srv_has_hw_support() + * 3. dmub_srv_calc_region_info() + * 4. dmub_srv_hw_init() + * + * The call to dmub_srv_create() is required to use the server. + * + * The calls to dmub_srv_has_hw_support() and dmub_srv_calc_region_info() + * are helpers to query cache window size and allocate framebuffer(s) + * for the cache windows. + * + * The call to dmub_srv_hw_init() programs the DMCUB registers to prepare + * for command submission. Commands can be queued via dmub_srv_cmd_queue() + * and executed via dmub_srv_cmd_execute(). + * + * If the queue is full the dmub_srv_wait_for_idle() call can be used to + * wait until the queue has been cleared. + * + * Destroying the DMUB service can be done by calling dmub_srv_destroy(). + * This does not clear DMUB hardware state, only software state. + * + * The interface is intended to be standalone and should not depend on any + * other component within DAL. + */ + +#include "dmub_types.h" +#include "dmub_cmd.h" +#include "dmub_rb.h" +#include "dmub_fw_state.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Forward declarations */ +struct dmub_srv; +struct dmub_cmd_header; +struct dmcu; + +/* enum dmub_status - return code for dmcub functions */ +enum dmub_status { + DMUB_STATUS_OK = 0, + DMUB_STATUS_NO_CTX, + DMUB_STATUS_QUEUE_FULL, + DMUB_STATUS_TIMEOUT, + DMUB_STATUS_INVALID, +}; + +/* enum dmub_asic - dmub asic identifier */ +enum dmub_asic { + DMUB_ASIC_NONE = 0, + DMUB_ASIC_DCN20, + DMUB_ASIC_DCN21, + DMUB_ASIC_MAX, +}; + +/* enum dmub_window_id - dmub window identifier */ +enum dmub_window_id { + DMUB_WINDOW_0_INST_CONST = 0, + DMUB_WINDOW_1_STACK, + DMUB_WINDOW_2_BSS_DATA, + DMUB_WINDOW_3_VBIOS, + DMUB_WINDOW_4_MAILBOX, + DMUB_WINDOW_5_TRACEBUFF, + DMUB_WINDOW_6_FW_STATE, + DMUB_WINDOW_7_RESERVED, + DMUB_WINDOW_TOTAL, +}; + +/** + * struct dmub_region - dmub hw memory region + * @base: base address for region, must be 256 byte aligned + * @top: top address for region + */ +struct dmub_region { + uint32_t base; + uint32_t top; +}; + +/** + * struct dmub_window - dmub hw cache window + * @off: offset to the fb memory in gpu address space + * @r: region in uc address space for cache window + */ +struct dmub_window { + union dmub_addr offset; + struct dmub_region region; +}; + +/** + * struct dmub_fb - defines a dmub framebuffer memory region + * @cpu_addr: cpu virtual address for the region, NULL if invalid + * @gpu_addr: gpu virtual address for the region, NULL if invalid + * @size: size of the region in bytes, zero if invalid + */ +struct dmub_fb { + void *cpu_addr; + uint64_t gpu_addr; + uint32_t size; +}; + +/** + * struct dmub_srv_region_params - params used for calculating dmub regions + * @inst_const_size: size of the fw inst const section + * @bss_data_size: size of the fw bss data section + * @vbios_size: size of the vbios data + */ +struct dmub_srv_region_params { + uint32_t inst_const_size; + uint32_t bss_data_size; + uint32_t vbios_size; +}; + +/** + * struct dmub_srv_region_info - output region info from the dmub service + * @fb_size: required minimum fb size for all regions, aligned to 4096 bytes + * @num_regions: number of regions used by the dmub service + * @regions: region info + * + * The regions are aligned such that they can be all placed within the + * same framebuffer but they can also be placed into different framebuffers. + * + * The size of each region can be calculated by the caller: + * size = reg.top - reg.base + * + * Care must be taken when performing custom allocations to ensure that each + * region base address is 256 byte aligned. + */ +struct dmub_srv_region_info { + uint32_t fb_size; + uint8_t num_regions; + struct dmub_region regions[DMUB_WINDOW_TOTAL]; +}; + +/** + * struct dmub_srv_fb_params - parameters used for driver fb setup + * @region_info: region info calculated by dmub service + * @cpu_addr: base cpu address for the framebuffer + * @gpu_addr: base gpu virtual address for the framebuffer + */ +struct dmub_srv_fb_params { + const struct dmub_srv_region_info *region_info; + void *cpu_addr; + uint64_t gpu_addr; +}; + +/** + * struct dmub_srv_fb_info - output fb info from the dmub service + * @num_fbs: number of required dmub framebuffers + * @fbs: fb data for each region + * + * Output from the dmub service helper that can be used by the + * driver to prepare dmub_fb that can be passed into the dmub + * hw init service. + * + * Assumes that all regions are within the same framebuffer + * and have been setup according to the region_info generated + * by the dmub service. + */ +struct dmub_srv_fb_info { + uint8_t num_fb; + struct dmub_fb fb[DMUB_WINDOW_TOTAL]; +}; + +/** + * struct dmub_srv_base_funcs - Driver specific base callbacks + */ +struct dmub_srv_base_funcs { + /** + * @reg_read: + * + * Hook for reading a register. + * + * Return: The 32-bit register value from the given address. + */ + uint32_t (*reg_read)(void *ctx, uint32_t address); + + /** + * @reg_write: + * + * Hook for writing a value to the register specified by address. + */ + void (*reg_write)(void *ctx, uint32_t address, uint32_t value); +}; + +/** + * struct dmub_srv_hw_funcs - hardware sequencer funcs for dmub + */ +struct dmub_srv_hw_funcs { + /* private: internal use only */ + + void (*reset)(struct dmub_srv *dmub); + + void (*reset_release)(struct dmub_srv *dmub); + + void (*backdoor_load)(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1); + + void (*setup_windows)(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5, + const struct dmub_window *cw6); + + void (*setup_mailbox)(struct dmub_srv *dmub, + const struct dmub_region *inbox1); + + uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub); + + void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset); + + bool (*is_supported)(struct dmub_srv *dmub); + + bool (*is_hw_init)(struct dmub_srv *dmub); + + bool (*is_phy_init)(struct dmub_srv *dmub); + + bool (*is_auto_load_done)(struct dmub_srv *dmub); +}; + +/** + * struct dmub_srv_create_params - params for dmub service creation + * @base_funcs: driver supplied base routines + * @hw_funcs: optional overrides for hw funcs + * @user_ctx: context data for callback funcs + * @asic: driver supplied asic + * @is_virtual: false for hw support only + */ +struct dmub_srv_create_params { + struct dmub_srv_base_funcs funcs; + struct dmub_srv_hw_funcs *hw_funcs; + void *user_ctx; + enum dmub_asic asic; + bool is_virtual; +}; + +/* + * struct dmub_srv_hw_params - params for dmub hardware initialization + * @fb: framebuffer info for each region + * @fb_base: base of the framebuffer aperture + * @fb_offset: offset of the framebuffer aperture + * @psp_version: psp version to pass for DMCU init + * @load_inst_const: true if DMUB should load inst const fw + */ +struct dmub_srv_hw_params { + struct dmub_fb *fb[DMUB_WINDOW_TOTAL]; + uint64_t fb_base; + uint64_t fb_offset; + uint32_t psp_version; + bool load_inst_const; +}; + +/** + * struct dmub_srv - software state for dmcub + * @asic: dmub asic identifier + * @user_ctx: user provided context for the dmub_srv + * @is_virtual: false if hardware support only + * @fw_state: dmub firmware state pointer + */ +struct dmub_srv { + enum dmub_asic asic; + void *user_ctx; + bool is_virtual; + volatile const struct dmub_fw_state *fw_state; + + /* private: internal use only */ + struct dmub_srv_base_funcs funcs; + struct dmub_srv_hw_funcs hw_funcs; + struct dmub_rb inbox1_rb; + + bool sw_init; + bool hw_init; + + uint64_t fb_base; + uint64_t fb_offset; + uint32_t psp_version; +}; + +/** + * dmub_srv_create() - creates the DMUB service. + * @dmub: the dmub service + * @params: creation parameters for the service + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_create(struct dmub_srv *dmub, + const struct dmub_srv_create_params *params); + +/** + * dmub_srv_destroy() - destroys the DMUB service. + * @dmub: the dmub service + */ +void dmub_srv_destroy(struct dmub_srv *dmub); + +/** + * dmub_srv_calc_region_info() - retreives region info from the dmub service + * @dmub: the dmub service + * @params: parameters used to calculate region locations + * @info_out: the output region info from dmub + * + * Calculates the base and top address for all relevant dmub regions + * using the parameters given (if any). + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status +dmub_srv_calc_region_info(struct dmub_srv *dmub, + const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out); + +/** + * dmub_srv_calc_region_info() - retreives fb info from the dmub service + * @dmub: the dmub service + * @params: parameters used to calculate fb locations + * @info_out: the output fb info from dmub + * + * Calculates the base and top address for all relevant dmub regions + * using the parameters given (if any). + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, + const struct dmub_srv_fb_params *params, + struct dmub_srv_fb_info *out); + +/** + * dmub_srv_has_hw_support() - returns hw support state for dmcub + * @dmub: the dmub service + * @is_supported: hw support state + * + * Queries the hardware for DMCUB support and returns the result. + * + * Can be called before dmub_srv_hw_init(). + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, + bool *is_supported); + +/** + * dmub_srv_is_hw_init() - returns hardware init state + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init); + +/** + * dmub_srv_hw_init() - initializes the underlying DMUB hardware + * @dmub: the dmub service + * @params: params for hardware initialization + * + * Resets the DMUB hardware and performs backdoor loading of the + * required cache regions based on the input framebuffer regions. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_NO_CTX - dmcub context not initialized + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, + const struct dmub_srv_hw_params *params); + +/** + * dmub_srv_cmd_queue() - queues a command to the DMUB + * @dmub: the dmub service + * @cmd: the command to queue + * + * Queues a command to the DMUB service but does not begin execution + * immediately. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_QUEUE_FULL - no remaining room in queue + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd); + +/** + * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub + * @dmub: the dmub service + * + * Begins execution of queued commands on the dmub. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub); + +/** + * dmub_srv_cmd_submit() - submits a command to the DMUB immediately + * @dmub: the dmub service + * @cmd: the command to submit + * @timeout_us: the maximum number of microseconds to wait + * + * Submits a command to the DMUB with an optional timeout. + * If timeout_us is given then the service will attempt to + * resubmit for the given number of microseconds. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for submit timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd, + uint32_t timeout_us); + +/** + * dmub_srv_wait_for_auto_load() - Waits for firmware auto load to complete + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until firmware has been autoloaded by the DMCUB. The maximum + * wait time is given in microseconds to prevent spinning forever. + * + * On ASICs without firmware autoload support this function will return + * immediately. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for phy init timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, + uint32_t timeout_us); + +/** + * dmub_srv_wait_for_phy_init() - Waits for DMUB PHY init to complete + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the PHY has been initialized by the DMUB. The maximum + * wait time is given in microseconds to prevent spinning forever. + * + * On ASICs without PHY init support this function will return + * immediately. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for phy init timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, + uint32_t timeout_us); + +/** + * dmub_srv_wait_for_idle() - Waits for the DMUB to be idle + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Waits until the DMUB buffer is empty and all commands have + * finished processing. The maximum wait time is given in + * microseconds to prevent spinning forever. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, + uint32_t timeout_us); + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h new file mode 100644 index 000000000000..6b3ee42db350 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h @@ -0,0 +1,69 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef _DMUB_TRACE_BUFFER_H_ +#define _DMUB_TRACE_BUFFER_H_ + +#include "dmub_types.h" + +#define LOAD_DMCU_FW 1 +#define LOAD_PHY_FW 2 + + +enum dmucb_trace_code { + DMCUB__UNKNOWN, + DMCUB__MAIN_BEGIN, + DMCUB__PHY_INIT_BEGIN, + DMCUB__PHY_FW_SRAM_LOAD_BEGIN, + DMCUB__PHY_FW_SRAM_LOAD_END, + DMCUB__PHY_INIT_POLL_DONE, + DMCUB__PHY_INIT_END, + DMCUB__DMCU_ERAM_LOAD_BEGIN, + DMCUB__DMCU_ERAM_LOAD_END, + DMCUB__DMCU_ISR_LOAD_BEGIN, + DMCUB__DMCU_ISR_LOAD_END, + DMCUB__MAIN_IDLE, + DMCUB__PERF_TRACE, + DMCUB__PG_DONE, +}; + +struct dmcub_trace_buf_entry { + enum dmucb_trace_code trace_code; + uint32_t tick_count; + uint32_t param0; + uint32_t param1; +}; + +#define TRACE_BUF_SIZE (1024) //1 kB +#define PERF_TRACE_MAX_ENTRY ((TRACE_BUF_SIZE - 8)/sizeof(struct dmcub_trace_buf_entry)) + + +struct dmcub_trace_buf { + uint32_t entry_count; + uint32_t clk_freq; + struct dmcub_trace_buf_entry entries[PERF_TRACE_MAX_ENTRY]; +}; + + +#endif /* _DMUB_TRACE_BUFFER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h new file mode 100644 index 000000000000..41d524b0db2f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h @@ -0,0 +1,64 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_TYPES_H_ +#define _DMUB_TYPES_H_ + +/* Basic type definitions. */ +#include <asm/byteorder.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <stdarg.h> + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef dmub_memcpy +#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes)) +#endif + +#ifndef dmub_memset +#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes)) +#endif + +#ifndef dmub_udelay +#define dmub_udelay(microseconds) udelay(microseconds) +#endif + +union dmub_addr { + struct { + uint32_t low_part; + uint32_t high_part; + } u; + uint64_t quad_part; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _DMUB_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile new file mode 100644 index 000000000000..e08dfeea24b0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -0,0 +1,27 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o + +AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DMUB) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c new file mode 100644 index 000000000000..951ea7053c7e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -0,0 +1,152 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_reg.h" + +#include "dcn/dcn_2_0_0_offset.h" +#include "dcn/dcn_2_0_0_sh_mask.h" +#include "soc15_hw_ip.h" +#include "vega10_ip_offset.h" + +#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg +#define CTX dmub + +void dmub_dcn20_reset(struct dmub_srv *dmub) +{ + REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); +} + +void dmub_dcn20_reset_release(struct dmub_srv *dmub) +{ + REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); + REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0); +} + +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, struct dmub_window *cw0, + struct dmub_window *cw1) +{ + REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x4, + DMCUB_MEM_WRITE_SPACE, 0x4); + + REG_WRITE(DMCUB_REGION3_CW0_OFFSET, cw0->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, cw0->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); + REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, + DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, + DMCUB_REGION3_CW0_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW1_OFFSET, cw1->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, cw1->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); + REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, + DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, + DMCUB_REGION3_CW1_ENABLE, 1); + + REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, + 0x20); +} + +void dmub_dcn20_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5, + const struct dmub_window *cw6) +{ + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, cw2->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, cw2->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); + REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, + DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, + DMCUB_REGION3_CW2_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW3_OFFSET, cw3->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, cw3->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); + REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, + DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, + DMCUB_REGION3_CW3_ENABLE, 1); + + /* TODO: Move this to CW4. */ + + REG_WRITE(DMCUB_REGION4_OFFSET, cw4->offset.u.low_part); + REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, cw4->offset.u.high_part); + REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, + cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, + 1); + + REG_WRITE(DMCUB_REGION3_CW5_OFFSET, cw5->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, cw5->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); + REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, + DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, + DMCUB_REGION3_CW5_ENABLE, 1); + + REG_WRITE(DMCUB_REGION3_CW6_OFFSET, cw6->offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, cw6->offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); + REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, + DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, + DMCUB_REGION3_CW6_ENABLE, 1); +} + +void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, + const struct dmub_region *inbox1) +{ + /* TODO: Use CW4 instead of region 4. */ + + REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); + REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); + REG_WRITE(DMCUB_INBOX1_WPTR, 0); +} + +uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_INBOX1_RPTR); +} + +void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) +{ + REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); +} + +bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0; +} + +bool dmub_dcn20_is_supported(struct dmub_srv *dmub) +{ + uint32_t supported = 0; + + REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); + + return supported; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h new file mode 100644 index 000000000000..e70a57573467 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -0,0 +1,63 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN20_H_ +#define _DMUB_DCN20_H_ + +#include "../inc/dmub_types.h" + +struct dmub_srv; + +/* Hardware functions. */ + +void dmub_dcn20_init(struct dmub_srv *dmub); + +void dmub_dcn20_reset(struct dmub_srv *dmub); + +void dmub_dcn20_reset_release(struct dmub_srv *dmub); + +void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1); + +void dmub_dcn20_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5, + const struct dmub_window *cw6); + +void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, + const struct dmub_region *inbox1); + +uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); + +bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); + +bool dmub_dcn20_is_supported(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c new file mode 100644 index 000000000000..9cea7a2d8dbf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c @@ -0,0 +1,141 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_reg.h" + +#include "dcn/dcn_2_1_0_offset.h" +#include "dcn/dcn_2_1_0_sh_mask.h" +#include "renoir_ip_offset.h" + +#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg +#define CTX dmub + +static inline void dmub_dcn21_translate_addr(const union dmub_addr *addr_in, + uint64_t fb_base, + uint64_t fb_offset, + union dmub_addr *addr_out) +{ + addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; +} + +void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1) +{ + union dmub_addr offset; + uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + + REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, + DMCUB_MEM_WRITE_SPACE, 0x3); + + dmub_dcn21_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); + REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, + DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, + DMCUB_REGION3_CW0_ENABLE, 1); + + dmub_dcn21_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); + REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, + DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, + DMCUB_REGION3_CW1_ENABLE, 1); + + REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, + 0x20); +} + +void dmub_dcn21_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5, + const struct dmub_window *cw6) +{ + union dmub_addr offset; + uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + + dmub_dcn21_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); + REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, + DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, + DMCUB_REGION3_CW2_ENABLE, 1); + + dmub_dcn21_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); + REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, + DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, + DMCUB_REGION3_CW3_ENABLE, 1); + + /* TODO: Move this to CW4. */ + dmub_dcn21_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, + cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, + 1); + + dmub_dcn21_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); + REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, + DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, + DMCUB_REGION3_CW5_ENABLE, 1); + + dmub_dcn21_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); + + REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); + REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); + REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, + DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, + DMCUB_REGION3_CW6_ENABLE, 1); +} + +bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) +{ + return (REG_READ(DMCUB_SCRATCH0) == 3); +} + +bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_SCRATCH10) == 0; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h new file mode 100644 index 000000000000..f7a93a5dcfa5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_DCN21_H_ +#define _DMUB_DCN21_H_ + +#include "dmub_dcn20.h" + +/* Hardware functions. */ + +void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, + const struct dmub_window *cw0, + const struct dmub_window *cw1); + +void dmub_dcn21_setup_windows(struct dmub_srv *dmub, + const struct dmub_window *cw2, + const struct dmub_window *cw3, + const struct dmub_window *cw4, + const struct dmub_window *cw5, + const struct dmub_window *cw6); + +bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); + +bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub); + +#endif /* _DMUB_DCN21_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c new file mode 100644 index 000000000000..4094eca212f0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c @@ -0,0 +1,109 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub_reg.h" +#include "../inc/dmub_srv.h" + +struct dmub_reg_value_masks { + uint32_t value; + uint32_t mask; +}; + +static inline void +set_reg_field_value_masks(struct dmub_reg_value_masks *field_value_mask, + uint32_t value, uint32_t mask, uint8_t shift) +{ + field_value_mask->value = + (field_value_mask->value & ~mask) | (mask & (value << shift)); + field_value_mask->mask = field_value_mask->mask | mask; +} + +static void set_reg_field_values(struct dmub_reg_value_masks *field_value_mask, + uint32_t addr, int n, uint8_t shift1, + uint32_t mask1, uint32_t field_value1, + va_list ap) +{ + uint32_t shift, mask, field_value; + int i = 1; + + /* gather all bits value/mask getting updated in this register */ + set_reg_field_value_masks(field_value_mask, field_value1, mask1, + shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t); + + set_reg_field_value_masks(field_value_mask, field_value, mask, + shift); + i++; + } +} + +static inline uint32_t get_reg_field_value_ex(uint32_t reg_value, uint32_t mask, + uint8_t shift) +{ + return (mask & reg_value) >> shift; +} + +void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, + uint32_t mask1, uint32_t field_value1, ...) +{ + struct dmub_reg_value_masks field_value_mask = { 0 }; + uint32_t reg_val; + va_list ap; + + va_start(ap, field_value1); + set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, + field_value1, ap); + va_end(ap); + + reg_val = srv->funcs.reg_read(srv->user_ctx, addr); + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + srv->funcs.reg_write(srv->user_ctx, addr, reg_val); +} + +void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) +{ + struct dmub_reg_value_masks field_value_mask = { 0 }; + va_list ap; + + va_start(ap, field_value1); + set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, + field_value1, ap); + va_end(ap); + + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + srv->funcs.reg_write(srv->user_ctx, addr, reg_val); +} + +void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, + uint32_t mask, uint32_t *field_value) +{ + uint32_t reg_val = srv->funcs.reg_read(srv->user_ctx, addr); + *field_value = get_reg_field_value_ex(reg_val, mask, shift); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h new file mode 100644 index 000000000000..bac4ee8f745f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h @@ -0,0 +1,120 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DMUB_REG_H_ +#define _DMUB_REG_H_ + +#include "../inc/dmub_types.h" + +struct dmub_srv; + +/* Register offset and field lookup. */ + +#define BASE(seg) BASE_INNER(seg) + +#define REG_OFFSET(base_index, addr) (BASE(base_index) + addr) + +#define REG(reg_name) REG_OFFSET(mm ## reg_name ## _BASE_IDX, mm ## reg_name) + +#define FD(reg_field) reg_field ## __SHIFT, reg_field ## _MASK + +#define FN(reg_name, field) FD(reg_name##__##field) + +/* Register reads and writes. */ + +#define REG_READ(reg) ((CTX)->funcs.reg_read((CTX)->user_ctx, REG(reg))) + +#define REG_WRITE(reg, val) \ + ((CTX)->funcs.reg_write((CTX)->user_ctx, REG(reg), (val))) + +/* Register field setting. */ + +#define REG_SET_N(reg_name, n, initial_val, ...) \ + dmub_reg_set(CTX, REG(reg_name), initial_val, n, __VA_ARGS__) + +#define REG_SET(reg_name, initial_val, field, val) \ + REG_SET_N(reg_name, 1, initial_val, \ + FN(reg_name, field), val) + +#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \ + REG_SET_N(reg, 2, init_value, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2) + +#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \ + REG_SET_N(reg, 3, init_value, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3) + +#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \ + REG_SET_N(reg, 4, init_value, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3, \ + FN(reg, f4), v4) + +/* Register field updating. */ + +#define REG_UPDATE_N(reg_name, n, ...)\ + dmub_reg_update(CTX, REG(reg_name), n, __VA_ARGS__) + +#define REG_UPDATE(reg_name, field, val) \ + REG_UPDATE_N(reg_name, 1, \ + FN(reg_name, field), val) + +#define REG_UPDATE_2(reg, f1, v1, f2, v2) \ + REG_UPDATE_N(reg, 2,\ + FN(reg, f1), v1,\ + FN(reg, f2), v2) + +#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \ + REG_UPDATE_N(reg, 3, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3) + +#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \ + REG_UPDATE_N(reg, 4, \ + FN(reg, f1), v1, \ + FN(reg, f2), v2, \ + FN(reg, f3), v3, \ + FN(reg, f4), v4) + +/* Register field getting. */ + +#define REG_GET(reg_name, field, val) \ + dmub_reg_get(CTX, REG(reg_name), FN(reg_name, field), val) + +void dmub_reg_set(struct dmub_srv *srv, uint32_t addr, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); + +void dmub_reg_update(struct dmub_srv *srv, uint32_t addr, int n, uint8_t shift1, + uint32_t mask1, uint32_t field_value1, ...); + +void dmub_reg_get(struct dmub_srv *srv, uint32_t addr, uint8_t shift, + uint32_t mask, uint32_t *field_value); + +#endif /* _DMUB_REG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c new file mode 100644 index 000000000000..5f39166d3c08 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -0,0 +1,453 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../inc/dmub_srv.h" +#include "dmub_dcn20.h" +#include "dmub_dcn21.h" +#include "dmub_trace_buffer.h" +#include "os_types.h" +/* + * Note: the DMUB service is standalone. No additional headers should be + * added below or above this line unless they reside within the DMUB + * folder. + */ + +/* Alignment for framebuffer memory. */ +#define DMUB_FB_ALIGNMENT (1024 * 1024) + +/* Stack size. */ +#define DMUB_STACK_SIZE (128 * 1024) + +/* Context size. */ +#define DMUB_CONTEXT_SIZE (512 * 1024) + +/* Mailbox size */ +#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) + + +/* Number of windows in use. */ +#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) +/* Base addresses. */ + +#define DMUB_CW0_BASE (0x60000000) +#define DMUB_CW1_BASE (0x61000000) +#define DMUB_CW3_BASE (0x63000000) +#define DMUB_CW5_BASE (0x65000000) +#define DMUB_CW6_BASE (0x66000000) + +static inline uint32_t dmub_align(uint32_t val, uint32_t factor) +{ + return (val + factor - 1) / factor * factor; +} + +static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) +{ + struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; + + switch (asic) { + case DMUB_ASIC_DCN20: + case DMUB_ASIC_DCN21: + funcs->reset = dmub_dcn20_reset; + funcs->reset_release = dmub_dcn20_reset_release; + funcs->backdoor_load = dmub_dcn20_backdoor_load; + funcs->setup_windows = dmub_dcn20_setup_windows; + funcs->setup_mailbox = dmub_dcn20_setup_mailbox; + funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; + funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; + funcs->is_supported = dmub_dcn20_is_supported; + funcs->is_hw_init = dmub_dcn20_is_hw_init; + + if (asic == DMUB_ASIC_DCN21) { + funcs->backdoor_load = dmub_dcn21_backdoor_load; + funcs->setup_windows = dmub_dcn21_setup_windows; + funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; + funcs->is_phy_init = dmub_dcn21_is_phy_init; + } + break; + + default: + return false; + } + + return true; +} + +enum dmub_status dmub_srv_create(struct dmub_srv *dmub, + const struct dmub_srv_create_params *params) +{ + enum dmub_status status = DMUB_STATUS_OK; + + dmub_memset(dmub, 0, sizeof(*dmub)); + + dmub->funcs = params->funcs; + dmub->user_ctx = params->user_ctx; + dmub->asic = params->asic; + dmub->is_virtual = params->is_virtual; + + /* Setup asic dependent hardware funcs. */ + if (!dmub_srv_hw_setup(dmub, params->asic)) { + status = DMUB_STATUS_INVALID; + goto cleanup; + } + + /* Override (some) hardware funcs based on user params. */ + if (params->hw_funcs) { + if (params->hw_funcs->get_inbox1_rptr) + dmub->hw_funcs.get_inbox1_rptr = + params->hw_funcs->get_inbox1_rptr; + + if (params->hw_funcs->set_inbox1_wptr) + dmub->hw_funcs.set_inbox1_wptr = + params->hw_funcs->set_inbox1_wptr; + + if (params->hw_funcs->is_supported) + dmub->hw_funcs.is_supported = + params->hw_funcs->is_supported; + } + + /* Sanity checks for required hw func pointers. */ + if (!dmub->hw_funcs.get_inbox1_rptr || + !dmub->hw_funcs.set_inbox1_wptr) { + status = DMUB_STATUS_INVALID; + goto cleanup; + } + +cleanup: + if (status == DMUB_STATUS_OK) + dmub->sw_init = true; + else + dmub_srv_destroy(dmub); + + return status; +} + +void dmub_srv_destroy(struct dmub_srv *dmub) +{ + dmub_memset(dmub, 0, sizeof(*dmub)); +} + +enum dmub_status +dmub_srv_calc_region_info(struct dmub_srv *dmub, + const struct dmub_srv_region_params *params, + struct dmub_srv_region_info *out) +{ + struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; + struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; + struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; + struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; + struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; + struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; + struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + memset(out, 0, sizeof(*out)); + + out->num_regions = DMUB_NUM_WINDOWS; + + inst->base = 0x0; + inst->top = inst->base + params->inst_const_size; + + data->base = dmub_align(inst->top, 256); + data->top = data->base + params->bss_data_size; + + stack->base = dmub_align(data->top, 256); + stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; + + bios->base = dmub_align(stack->top, 256); + bios->top = bios->base + params->vbios_size; + + mail->base = dmub_align(bios->top, 256); + mail->top = mail->base + DMUB_MAILBOX_SIZE; + + trace_buff->base = dmub_align(mail->top, 256); + trace_buff->top = trace_buff->base + TRACE_BUF_SIZE; + + fw_state->base = dmub_align(trace_buff->top, 256); + + /* Align firmware state to size of cache line. */ + fw_state->top = + fw_state->base + dmub_align(sizeof(struct dmub_fw_state), 64); + + out->fb_size = dmub_align(fw_state->top, 4096); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, + const struct dmub_srv_fb_params *params, + struct dmub_srv_fb_info *out) +{ + uint8_t *cpu_base; + uint64_t gpu_base; + uint32_t i; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + memset(out, 0, sizeof(*out)); + + if (params->region_info->num_regions != DMUB_NUM_WINDOWS) + return DMUB_STATUS_INVALID; + + cpu_base = (uint8_t *)params->cpu_addr; + gpu_base = params->gpu_addr; + + for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { + const struct dmub_region *reg = + ¶ms->region_info->regions[i]; + + out->fb[i].cpu_addr = cpu_base + reg->base; + out->fb[i].gpu_addr = gpu_base + reg->base; + out->fb[i].size = reg->top - reg->base; + } + + out->num_fb = DMUB_NUM_WINDOWS; + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, + bool *is_supported) +{ + *is_supported = false; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.is_supported) + *is_supported = dmub->hw_funcs.is_supported(dmub); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) +{ + *is_hw_init = false; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_funcs.is_hw_init) + *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, + const struct dmub_srv_hw_params *params) +{ + struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; + struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; + struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; + struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; + struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; + struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; + struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; + + struct dmub_rb_init_params rb_params; + struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; + struct dmub_region inbox1; + + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + dmub->fb_base = params->fb_base; + dmub->fb_offset = params->fb_offset; + dmub->psp_version = params->psp_version; + + if (inst_fb && data_fb) { + cw0.offset.quad_part = inst_fb->gpu_addr; + cw0.region.base = DMUB_CW0_BASE; + cw0.region.top = cw0.region.base + inst_fb->size - 1; + + cw1.offset.quad_part = stack_fb->gpu_addr; + cw1.region.base = DMUB_CW1_BASE; + cw1.region.top = cw1.region.base + stack_fb->size - 1; + + if (params->load_inst_const && dmub->hw_funcs.backdoor_load) + dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); + } + + if (dmub->hw_funcs.reset) + dmub->hw_funcs.reset(dmub); + + if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && + fw_state_fb) { + cw2.offset.quad_part = data_fb->gpu_addr; + cw2.region.base = DMUB_CW0_BASE + inst_fb->size; + cw2.region.top = cw2.region.base + data_fb->size; + + cw3.offset.quad_part = bios_fb->gpu_addr; + cw3.region.base = DMUB_CW3_BASE; + cw3.region.top = cw3.region.base + bios_fb->size; + + cw4.offset.quad_part = mail_fb->gpu_addr; + cw4.region.base = cw3.region.top + 1; + cw4.region.top = cw4.region.base + mail_fb->size; + + inbox1.base = cw4.region.base; + inbox1.top = cw4.region.top; + + cw5.offset.quad_part = tracebuff_fb->gpu_addr; + cw5.region.base = DMUB_CW5_BASE; + cw5.region.top = cw5.region.base + tracebuff_fb->size; + + cw6.offset.quad_part = fw_state_fb->gpu_addr; + cw6.region.base = DMUB_CW6_BASE; + cw6.region.top = cw6.region.base + fw_state_fb->size; + + dmub->fw_state = fw_state_fb->cpu_addr; + + if (dmub->hw_funcs.setup_windows) + dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, + &cw5, &cw6); + + if (dmub->hw_funcs.setup_mailbox) + dmub->hw_funcs.setup_mailbox(dmub, &inbox1); + } + + if (mail_fb) { + dmub_memset(&rb_params, 0, sizeof(rb_params)); + rb_params.ctx = dmub; + rb_params.base_address = mail_fb->cpu_addr; + rb_params.capacity = DMUB_RB_SIZE; + + dmub_rb_init(&dmub->inbox1_rb, &rb_params); + } + + if (dmub->hw_funcs.reset_release) + dmub->hw_funcs.reset_release(dmub); + + dmub->hw_init = true; + + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd) +{ + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) + return DMUB_STATUS_OK; + + return DMUB_STATUS_QUEUE_FULL; +} + +enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) +{ + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_cmd_submit(struct dmub_srv *dmub, + const struct dmub_cmd_header *cmd, + uint32_t timeout_us) +{ + uint32_t i = 0; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; ++i) { + dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) { + dmub->hw_funcs.set_inbox1_wptr(dmub, + dmub->inbox1_rb.wrpt); + return DMUB_STATUS_OK; + } + + udelay(1); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (!dmub->hw_funcs.is_auto_load_done) + return DMUB_STATUS_OK; + + for (i = 0; i <= timeout_us; i += 100) { + if (dmub->hw_funcs.is_auto_load_done(dmub)) + return DMUB_STATUS_OK; + + udelay(100); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i = 0; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + if (!dmub->hw_funcs.is_phy_init) + return DMUB_STATUS_OK; + + for (i = 0; i <= timeout_us; i += 10) { + if (dmub->hw_funcs.is_phy_init(dmub)) + return DMUB_STATUS_OK; + + udelay(10); + } + + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, + uint32_t timeout_us) +{ + uint32_t i; + + if (!dmub->hw_init) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; ++i) { + dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + if (dmub_rb_empty(&dmub->inbox1_rb)) + return DMUB_STATUS_OK; + + udelay(1); + } + + return DMUB_STATUS_TIMEOUT; +} diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 1be6c44fd32f..72b659c63aea 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,23 +134,20 @@ #define PICASSO_A0 0x41 /* DCN1_01 */ #define RAVEN2_A0 0x81 +#define RAVEN2_15D8_REV_E3 0xE3 +#define RAVEN2_15D8_REV_E4 0xE4 #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF -#define PICASSO_15D8_REV_E3 0xE3 -#define PICASSO_15D8_REV_E4 0xE4 - #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) -#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < PICASSO_15D8_REV_E3)) -#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && (eChipRev < RAVEN1_F0)) - +#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0)) #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) - +#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \ + || (eChipRev == RAVEN2_15D8_REV_E4)) #define FAMILY_RV 142 /* DCN 1*/ -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define FAMILY_NV 143 /* DCN 2*/ @@ -164,12 +161,9 @@ enum { #define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) #define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) #define RENOIR_A0 0x91 #define DEVICE_ID_RENOIR_1636 0x1636 // Renoir #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF)) -#endif /* * ASIC chip ID diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index fcc42372b6cf..0b6859189ca7 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -46,12 +46,8 @@ enum dce_version { DCE_VERSION_MAX, DCN_VERSION_1_0, DCN_VERSION_1_01, -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) DCN_VERSION_2_0, -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) DCN_VERSION_2_1, -#endif DCN_VERSION_MAX }; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 18961707db23..9ad49da50a17 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -31,6 +31,8 @@ #define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9 #define DP_BRANCH_DEVICE_ID_00001A 0x00001A #define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1 +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 +#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C enum ddc_result { DDC_RESULT_UNKNOWN = 0, diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index f312834fef50..d51de94e4bc3 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -178,7 +178,8 @@ struct dc_firmware_info { uint32_t default_engine_clk; /* in KHz */ uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */ uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */ - + bool oem_i2c_present; + uint8_t oem_i2c_obj_id; }; diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h new file mode 100644 index 000000000000..f31e6befc8d6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h @@ -0,0 +1,96 @@ +/* +* Copyright 2019 Advanced Micro Devices, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +* OTHER DEALINGS IN THE SOFTWARE. +* +* Authors: AMD +* +*/ + +#ifndef __DC_HDCP_TYPES_H__ +#define __DC_HDCP_TYPES_H__ + +enum hdcp_message_id { + HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + HDCP_MESSAGE_ID_READ_BKSV = 0, + /* HDMI is called Ri', DP is called R0' */ + HDCP_MESSAGE_ID_READ_RI_R0, + HDCP_MESSAGE_ID_READ_PJ, + HDCP_MESSAGE_ID_WRITE_AKSV, + HDCP_MESSAGE_ID_WRITE_AINFO, + HDCP_MESSAGE_ID_WRITE_AN, + HDCP_MESSAGE_ID_READ_VH_X, + HDCP_MESSAGE_ID_READ_VH_0, + HDCP_MESSAGE_ID_READ_VH_1, + HDCP_MESSAGE_ID_READ_VH_2, + HDCP_MESSAGE_ID_READ_VH_3, + HDCP_MESSAGE_ID_READ_VH_4, + HDCP_MESSAGE_ID_READ_BCAPS, + HDCP_MESSAGE_ID_READ_BSTATUS, + HDCP_MESSAGE_ID_READ_KSV_FIFO, + HDCP_MESSAGE_ID_READ_BINFO, + + /* HDCP 2.2 */ + + HDCP_MESSAGE_ID_HDCP2VERSION, + HDCP_MESSAGE_ID_RX_CAPS, + HDCP_MESSAGE_ID_WRITE_AKE_INIT, + HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + HDCP_MESSAGE_ID_WRITE_LC_INIT, + HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + HDCP_MESSAGE_ID_READ_RXSTATUS, + HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + + HDCP_MESSAGE_ID_MAX +}; + +enum hdcp_version { + HDCP_Unknown = 0, + HDCP_VERSION_14, + HDCP_VERSION_22, +}; + +enum hdcp_link { + HDCP_LINK_PRIMARY, + HDCP_LINK_SECONDARY +}; + +struct hdcp_protection_message { + enum hdcp_version version; + /* relevant only for DVI */ + enum hdcp_link link; + enum hdcp_message_id msg_id; + uint32_t length; + uint8_t max_retries; + uint8_t *data; +}; + +#endif diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index bb012cb1a9f5..c7fbb9c3ad6b 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -42,7 +42,7 @@ struct aux_payload { bool write; bool mot; uint32_t address; - uint8_t length; + uint32_t length; uint8_t *data; /* * used to return the reply type of the transaction diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 876b0b3e1a9c..4869d4562e4d 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -123,6 +123,13 @@ enum dp_test_pattern { DP_TEST_PATTERN_UNSUPPORTED }; +enum dp_test_pattern_color_space { + DP_TEST_PATTERN_COLOR_SPACE_RGB, + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601, + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED +}; + enum dp_panel_mode { /* not required */ DP_PANEL_MODE_DEFAULT, diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 2b219cdb13ad..89a709267019 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -66,12 +66,8 @@ #define DC_LOG_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_ALL_GAMMA(...) pr_debug("[GAMMA]:"__VA_ARGS__) #define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__) -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN3_0) || defined(CONFIG_DRM_AMD_DC_DCN2_0) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif struct dal_logger; @@ -116,9 +112,7 @@ enum dc_log_type { LOG_PERF_TRACE, LOG_DISPLAYSTATS, LOG_HDMI_RETIMER_REDRIVER, -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT LOG_DSC, -#endif LOG_DWB, LOG_GAMMA_DEBUG, LOG_MAX_HW_POINTS, diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 2d8f14b69117..b52c4d379651 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -154,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y) struct fixed31_32 l_pow_m1; struct fixed31_32 base, div; + struct fixed31_32 base2; if (dc_fixpt_lt(in_x, dc_fixpt_zero)) @@ -163,13 +164,15 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y) dc_fixpt_div(dc_fixpt_one, m2)); base = dc_fixpt_sub(l_pow_m1, c1); - if (dc_fixpt_lt(base, dc_fixpt_zero)) - base = dc_fixpt_zero; - div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1)); - *out_y = dc_fixpt_pow(dc_fixpt_div(base, div), - dc_fixpt_div(dc_fixpt_one, m1)); + base2 = dc_fixpt_div(base, div); + //avoid complex numbers + if (dc_fixpt_lt(base2, dc_fixpt_zero)) + base2 = dc_fixpt_sub(dc_fixpt_zero, base2); + + + *out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1)); } @@ -373,7 +376,42 @@ static struct fixed31_32 translate_from_linear_space( return dc_fixpt_mul(args->arg, args->a1); } -static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) + +static struct fixed31_32 translate_from_linear_space_long( + struct translate_from_linear_space_args *args) +{ + const struct fixed31_32 one = dc_fixpt_from_int(1); + + if (dc_fixpt_lt(one, args->arg)) + return one; + + if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) + return dc_fixpt_sub( + args->a2, + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + dc_fixpt_neg(args->arg), + dc_fixpt_recip(args->gamma)))); + else if (dc_fixpt_le(args->a0, args->arg)) + return dc_fixpt_sub( + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + args->arg, + dc_fixpt_recip(args->gamma))), + args->a2); + else + return dc_fixpt_mul( + args->arg, + args->a1); +} + +static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf) { struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); @@ -384,9 +422,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) scratch_gamma_args.a3 = dc_fixpt_zero; scratch_gamma_args.gamma = gamma; + if (use_eetf) + return translate_from_linear_space_long(&scratch_gamma_args); + return translate_from_linear_space(&scratch_gamma_args); } + static struct fixed31_32 translate_to_linear_space( struct fixed31_32 arg, struct fixed31_32 a0, @@ -898,7 +940,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, struct fixed31_32 max_display; struct fixed31_32 min_display; struct fixed31_32 max_content; - struct fixed31_32 min_content; struct fixed31_32 clip = dc_fixpt_one; struct fixed31_32 output; bool use_eetf = false; @@ -912,7 +953,6 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, max_display = dc_fixpt_from_int(fs_params->max_display); min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); max_content = dc_fixpt_from_int(fs_params->max_content); - min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); if (fs_params->min_display > 1000) // cap at 0.1 at the bottom @@ -920,11 +960,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (fs_params->max_display < 100) // cap at 100 at the top max_display = dc_fixpt_from_int(100); - if (fs_params->min_content < fs_params->min_display) - use_eetf = true; - else - min_content = min_display; - + // only max used, we don't adjust min luminance if (fs_params->max_content > fs_params->max_display) use_eetf = true; else @@ -950,7 +986,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) output = dc_fixpt_zero; else - output = calculate_gamma22(scaledX); + output = calculate_gamma22(scaledX, use_eetf); rgb->r = output; rgb->g = output; @@ -1965,10 +2001,28 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, tf_pts->x_point_at_y1_green = 1; tf_pts->x_point_at_y1_blue = 1; - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axis_x, curve, - MAX_HW_POINTS, tf_pts, - mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); + if (input_tf->tf == TRANSFER_FUNCTION_PQ) { + /* just copy current rgb_regamma into tf_pts */ + struct pwl_float_data_ex *curvePt = curve; + int i = 0; + + while (i <= MAX_HW_POINTS) { + tf_pts->red[i] = curvePt->r; + tf_pts->green[i] = curvePt->g; + tf_pts->blue[i] = curvePt->b; + ++curvePt; + ++i; + } + } else { + //clamps to 0-1 + map_regamma_hw_to_x_user(ramp, coeff, rgb_user, + coordinates_x, axis_x, curve, + MAX_HW_POINTS, tf_pts, + mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); + } + + + if (ramp->type == GAMMA_CUSTOM) apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); @@ -2173,5 +2227,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, rgb_degamma_alloc_fail: return ret; } - - diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ec70c9b12e1a..a94700940fd6 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -122,7 +122,7 @@ static unsigned int calc_v_total_from_refresh( const struct dc_stream_state *stream, unsigned int refresh_in_uhz) { - unsigned int v_total = stream->timing.v_total; + unsigned int v_total; unsigned int frame_duration_in_ns; frame_duration_in_ns = @@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp( current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000); + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) + v_total = stream->timing.v_total; + in_out_vrr->adjust.v_total_min = v_total; in_out_vrr->adjust.v_total_max = v_total; } @@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); + /* Rounded to the nearest Hz */ + nominal_field_rate_in_uhz = 1000000ULL * + div_u64(nominal_field_rate_in_uhz + 500000, 1000000); + min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -975,13 +983,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, unsigned int *inserted_frames, unsigned int *inserted_duration_in_us) { - struct core_freesync *core_freesync = NULL; - if (mod_freesync == NULL) return; - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - if (vrr->supported) { *v_total_min = vrr->adjust.v_total_min; *v_total_max = vrr->adjust.v_total_max; @@ -996,14 +1000,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate( const struct dc_stream_state *stream) { unsigned long long nominal_field_rate_in_uhz = 0; + unsigned int total = stream->timing.h_total * stream->timing.v_total; - /* Calculate nominal field rate for stream */ + /* Calculate nominal field rate for stream, rounded up to nearest integer */ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); + + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); return nominal_field_rate_in_uhz; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile new file mode 100644 index 000000000000..904424da01b5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile @@ -0,0 +1,33 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'hdcp' sub-module of DAL. +# + +HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \ + hdcp1_execution.o hdcp1_transition.o \ + hdcp2_execution.o hdcp2_transition.o + +AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP)) +#$(info ************ DAL-HDCP_MAKEFILE ************) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c new file mode 100644 index 000000000000..8aa528e874c4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -0,0 +1,509 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static void push_error_status(struct mod_hdcp *hdcp, + enum mod_hdcp_status status) +{ + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + + if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) { + trace->errors[trace->error_count].status = status; + trace->errors[trace->error_count].state_id = hdcp->state.id; + trace->error_count++; + HDCP_ERROR_TRACE(hdcp, status); + } + + if (is_hdcp1(hdcp)) { + hdcp->connection.hdcp1_retry_count++; + } else if (is_hdcp2(hdcp)) { + hdcp->connection.hdcp2_retry_count++; + } +} + +static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) +{ + int i, is_auth_needed = 0; + + /* if all displays on the link don't need authentication, + * hdcp is not desired + */ + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->connection.displays[i].adjust.disable) { + is_auth_needed = 1; + break; + } + } + + return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && + is_auth_needed && + !hdcp->connection.link.adjust.hdcp1.disable; +} + +static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) +{ + int i, is_auth_needed = 0; + + /* if all displays on the link don't need authentication, + * hdcp is not desired + */ + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->connection.displays[i].adjust.disable) { + is_auth_needed = 1; + break; + } + } + + return (hdcp->connection.hdcp2_retry_count < MAX_NUM_OF_ATTEMPTS) && + is_auth_needed && + !hdcp->connection.link.adjust.hdcp2.disable && + !hdcp->connection.is_hdcp2_revoked; +} + +static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_in_initialized_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* initialize transition input */ + memset(input, 0, sizeof(union mod_hdcp_transition_input)); + } else if (is_in_cp_not_desired_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* update topology event if hdcp is not desired */ + status = mod_hdcp_add_display_topology(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_execution(hdcp, + event_ctx, &input->hdcp1); + } else if (is_in_hdcp2_states(hdcp)) { + status = mod_hdcp_hdcp2_execution(hdcp, event_ctx, &input->hdcp2); + } else if (is_in_hdcp2_dp_states(hdcp)) { + status = mod_hdcp_hdcp2_dp_execution(hdcp, + event_ctx, &input->hdcp2); + } +out: + return status; +} + +static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->unexpected_event) + goto out; + + if (is_in_initialized_state(hdcp)) { + if (is_dp_hdcp(hdcp)) + if (is_cp_desired_hdcp2(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A0_DETERMINE_RX_HDCP_CAPABLE); + } else if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (is_cp_desired_hdcp2(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A0_KNOWN_HDCP2_CAPABLE_RX); + } else if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + } else if (is_in_cp_not_desired_state(hdcp)) { + increment_stay_counter(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else if (is_in_hdcp2_states(hdcp)) { + status = mod_hdcp_hdcp2_transition(hdcp, + event_ctx, &input->hdcp2, output); + } else if (is_in_hdcp2_dp_states(hdcp)) { + status = mod_hdcp_hdcp2_dp_transition(hdcp, + event_ctx, &input->hdcp2, output); + } else { + status = MOD_HDCP_STATUS_INVALID_STATE; + } +out: + return status; +} + +static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_hdcp1(hdcp)) { + if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) { + /* TODO - update psp to unify create session failure + * recovery between hdcp1 and 2. + */ + mod_hdcp_hdcp1_destroy_session(hdcp); + + } + if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else if (is_hdcp2(hdcp)) { + if (hdcp->auth.trans_input.hdcp2.create_session == PASS) { + status = mod_hdcp_hdcp2_destroy_session(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else if (is_in_cp_not_desired_state(hdcp)) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } + +out: + /* stop callback and watchdog requests from previous authentication*/ + output->watchdog_timer_stop = 1; + output->callback_stop = 1; + return status; +} + +static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(output, 0, sizeof(struct mod_hdcp_output)); + + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (current_state(hdcp) != HDCP_UNINITIALIZED) { + HDCP_TOP_RESET_CONN_TRACE(hdcp); + set_state_id(hdcp, output, HDCP_UNINITIALIZED); + } + memset(&hdcp->connection, 0, sizeof(hdcp->connection)); +out: + return status; +} + +/* + * Implementation of functions in mod_hdcp.h + */ +size_t mod_hdcp_get_memory_size(void) +{ + return sizeof(struct mod_hdcp); +} + +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config) +{ + struct mod_hdcp_output output; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(hdcp, 0, sizeof(struct mod_hdcp)); + memset(&output, 0, sizeof(output)); + hdcp->config = *config; + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, &output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_output output; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + memset(&output, 0, sizeof(output)); + status = reset_connection(hdcp, &output); + if (status == MOD_HDCP_STATUS_SUCCESS) + memset(hdcp, 0, sizeof(struct mod_hdcp)); + else + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display_container = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* skip inactive display */ + if (display->state != MOD_HDCP_DISPLAY_ACTIVE) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* check existing display container */ + if (get_active_display_at_index(hdcp, display->index)) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* find an empty display container */ + display_container = get_empty_display_container(hdcp); + if (!display_container) { + status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND; + goto out; + } + + /* reset existing authentication status */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* add display to connection */ + hdcp->connection.link = *link; + *display_container = *display; + + /* reset retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication */ + if (current_state(hdcp) != HDCP_INITIALIZED) + set_state_id(hdcp, output, HDCP_INITIALIZED); + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* stop current authentication */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* remove display */ + display->state = MOD_HDCP_DISPLAY_INACTIVE; + + /* clear retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication for remaining displays*/ + if (get_active_display_count(hdcp) > 0) + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, + output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + goto out; + } + + /* populate query */ + query->link = &hdcp->connection.link; + query->display = display; + query->trace = &hdcp->connection.trace; + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + if (is_display_encryption_enabled(display)) { + if (is_hdcp1(hdcp)) { + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; + } else if (is_hdcp2(hdcp)) { + if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; + else if (query->link->adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1) + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; + else + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON; + } + } else { + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status exec_status, trans_status, reset_status, status; + struct mod_hdcp_event_context event_ctx; + + HDCP_EVENT_TRACE(hdcp, event); + memset(output, 0, sizeof(struct mod_hdcp_output)); + memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context)); + event_ctx.event = event; + + /* execute and transition */ + exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input); + trans_status = transition( + hdcp, &event_ctx, &hdcp->auth.trans_input, output); + if (trans_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_SUCCESS; + } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE; + push_error_status(hdcp, status); + } else { + status = exec_status; + push_error_status(hdcp, status); + } + + /* reset authentication if needed */ + if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) { + HDCP_FULL_DDC_TRACE(hdcp); + reset_status = reset_authentication(hdcp, output); + if (reset_status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, reset_status); + } + return status; +} + +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal) +{ + enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF; + + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + mode = MOD_HDCP_MODE_DEFAULT; + break; + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + mode = MOD_HDCP_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + mode = MOD_HDCP_MODE_DP_MST; + break; + default: + break; + } + + return mode; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h new file mode 100644 index 000000000000..f98d3d9ecb6d --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -0,0 +1,587 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef HDCP_H_ +#define HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp_log.h" + +#include <drm/drm_hdcp.h> +#include <drm/drm_dp_helper.h> + +enum mod_hdcp_trans_input_result { + UNKNOWN = 0, + PASS, + FAIL +}; + +struct mod_hdcp_transition_input_hdcp1 { + uint8_t bksv_read; + uint8_t bksv_validation; + uint8_t add_topology; + uint8_t create_session; + uint8_t an_write; + uint8_t aksv_write; + uint8_t ainfo_write; + uint8_t bcaps_read; + uint8_t r0p_read; + uint8_t rx_validation; + uint8_t encryption; + uint8_t link_maintenance; + uint8_t ready_check; + uint8_t bstatus_read; + uint8_t max_cascade_check; + uint8_t max_devs_check; + uint8_t device_count_check; + uint8_t ksvlist_read; + uint8_t vp_read; + uint8_t ksvlist_vp_validation; + + uint8_t hdcp_capable_dp; + uint8_t binfo_read_dp; + uint8_t r0p_available_dp; + uint8_t link_integiry_check; + uint8_t reauth_request_check; + uint8_t stream_encryption_dp; +}; + +struct mod_hdcp_transition_input_hdcp2 { + uint8_t hdcp2version_read; + uint8_t hdcp2_capable_check; + uint8_t add_topology; + uint8_t create_session; + uint8_t ake_init_prepare; + uint8_t ake_init_write; + uint8_t rxstatus_read; + uint8_t ake_cert_available; + uint8_t ake_cert_read; + uint8_t ake_cert_validation; + uint8_t stored_km_write; + uint8_t no_stored_km_write; + uint8_t h_prime_available; + uint8_t h_prime_read; + uint8_t pairing_available; + uint8_t pairing_info_read; + uint8_t h_prime_validation; + uint8_t lc_init_prepare; + uint8_t lc_init_write; + uint8_t l_prime_available_poll; + uint8_t l_prime_read; + uint8_t l_prime_validation; + uint8_t eks_prepare; + uint8_t eks_write; + uint8_t enable_encryption; + uint8_t reauth_request_check; + uint8_t rx_id_list_read; + uint8_t device_count_check; + uint8_t rx_id_list_validation; + uint8_t repeater_auth_ack_write; + uint8_t prepare_stream_manage; + uint8_t stream_manage_write; + uint8_t stream_ready_available; + uint8_t stream_ready_read; + uint8_t stream_ready_validation; + + uint8_t rx_caps_read_dp; + uint8_t content_stream_type_write; + uint8_t link_integrity_check_dp; + uint8_t stream_encryption_dp; +}; + +union mod_hdcp_transition_input { + struct mod_hdcp_transition_input_hdcp1 hdcp1; + struct mod_hdcp_transition_input_hdcp2 hdcp2; +}; + +struct mod_hdcp_message_hdcp1 { + uint8_t an[8]; + uint8_t aksv[5]; + uint8_t ainfo; + uint8_t bksv[5]; + uint16_t r0p; + uint8_t bcaps; + uint16_t bstatus; + uint8_t ksvlist[635]; + uint16_t ksvlist_size; + uint8_t vp[20]; + + uint16_t binfo_dp; +}; + +struct mod_hdcp_message_hdcp2 { + uint8_t hdcp2version_hdmi; + uint8_t rxcaps_dp[3]; + uint8_t rxstatus[2]; + + uint8_t ake_init[12]; + uint8_t ake_cert[534]; + uint8_t ake_no_stored_km[129]; + uint8_t ake_stored_km[33]; + uint8_t ake_h_prime[33]; + uint8_t ake_pairing_info[17]; + uint8_t lc_init[9]; + uint8_t lc_l_prime[33]; + uint8_t ske_eks[25]; + uint8_t rx_id_list[177]; // 22 + 5 * 31 + uint16_t rx_id_list_size; + uint8_t repeater_auth_ack[17]; + uint8_t repeater_auth_stream_manage[68]; // 6 + 2 * 31 + uint16_t stream_manage_size; + uint8_t repeater_auth_stream_ready[33]; + uint8_t rxstatus_dp; + uint8_t content_stream_type_dp[2]; +}; + +union mod_hdcp_message { + struct mod_hdcp_message_hdcp1 hdcp1; + struct mod_hdcp_message_hdcp2 hdcp2; +}; + +struct mod_hdcp_auth_counters { + uint8_t stream_management_retry_count; +}; + +/* contains values per connection */ +struct mod_hdcp_connection { + struct mod_hdcp_link link; + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; + uint8_t is_repeater; + uint8_t is_km_stored; + uint8_t is_hdcp2_revoked; + struct mod_hdcp_trace trace; + uint8_t hdcp1_retry_count; + uint8_t hdcp2_retry_count; +}; + +/* contains values per authentication cycle */ +struct mod_hdcp_authentication { + uint32_t id; + union mod_hdcp_message msg; + union mod_hdcp_transition_input trans_input; + struct mod_hdcp_auth_counters count; +}; + +/* contains values per state change */ +struct mod_hdcp_state { + uint8_t id; + uint32_t stay_count; +}; + +/* per event in a state */ +struct mod_hdcp_event_context { + enum mod_hdcp_event event; + uint8_t rx_id_list_ready; + uint8_t unexpected_event; +}; + +struct mod_hdcp { + /* per link */ + struct mod_hdcp_config config; + /* per connection */ + struct mod_hdcp_connection connection; + /* per authentication attempt */ + struct mod_hdcp_authentication auth; + /* per state in an authentication */ + struct mod_hdcp_state state; + /* reserved memory buffer */ + uint8_t buf[2025]; +}; + +enum mod_hdcp_initial_state_id { + HDCP_UNINITIALIZED = 0x0, + HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED, + HDCP_INITIALIZED, + HDCP_CP_NOT_DESIRED, + HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED +}; + +enum mod_hdcp_hdcp1_state_id { + HDCP1_STATE_START = HDCP_INITIAL_STATE_END, + H1_A0_WAIT_FOR_ACTIVE_RX, + H1_A1_EXCHANGE_KSVS, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER, + H1_A45_AUTHENTICATED, + H1_A8_WAIT_FOR_READY, + H1_A9_READ_KSV_LIST, + HDCP1_STATE_END = H1_A9_READ_KSV_LIST +}; + +enum mod_hdcp_hdcp1_dp_state_id { + HDCP1_DP_STATE_START = HDCP1_STATE_END, + D1_A0_DETERMINE_RX_HDCP_CAPABLE, + D1_A1_EXCHANGE_KSVS, + D1_A23_WAIT_FOR_R0_PRIME, + D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER, + D1_A4_AUTHENTICATED, + D1_A6_WAIT_FOR_READY, + D1_A7_READ_KSV_LIST, + HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST, +}; + +enum mod_hdcp_hdcp2_state_id { + HDCP2_STATE_START = HDCP1_DP_STATE_END, + H2_A0_KNOWN_HDCP2_CAPABLE_RX, + H2_A1_SEND_AKE_INIT, + H2_A1_VALIDATE_AKE_CERT, + H2_A1_SEND_NO_STORED_KM, + H2_A1_READ_H_PRIME, + H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME, + H2_A1_SEND_STORED_KM, + H2_A1_VALIDATE_H_PRIME, + H2_A2_LOCALITY_CHECK, + H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER, + H2_ENABLE_ENCRYPTION, + H2_A5_AUTHENTICATED, + H2_A6_WAIT_FOR_RX_ID_LIST, + H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK, + H2_A9_SEND_STREAM_MANAGEMENT, + H2_A9_VALIDATE_STREAM_READY, + HDCP2_STATE_END = H2_A9_VALIDATE_STREAM_READY, +}; + +enum mod_hdcp_hdcp2_dp_state_id { + HDCP2_DP_STATE_START = HDCP2_STATE_END, + D2_A0_DETERMINE_RX_HDCP_CAPABLE, + D2_A1_SEND_AKE_INIT, + D2_A1_VALIDATE_AKE_CERT, + D2_A1_SEND_NO_STORED_KM, + D2_A1_READ_H_PRIME, + D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME, + D2_A1_SEND_STORED_KM, + D2_A1_VALIDATE_H_PRIME, + D2_A2_LOCALITY_CHECK, + D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER, + D2_SEND_CONTENT_STREAM_TYPE, + D2_ENABLE_ENCRYPTION, + D2_A5_AUTHENTICATED, + D2_A6_WAIT_FOR_RX_ID_LIST, + D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK, + D2_A9_SEND_STREAM_MANAGEMENT, + D2_A9_VALIDATE_STREAM_READY, + HDCP2_DP_STATE_END = D2_A9_VALIDATE_STREAM_READY, + HDCP_STATE_END = HDCP2_DP_STATE_END, +}; + +/* hdcp1 executions and transitions */ +typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp); +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str); +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); + +/* hdcp2 executions and transitions */ +enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input); +enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input); +enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output); + +/* log functions */ +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size); +/* TODO: add adjustment log */ + +/* psp functions */ +enum mod_hdcp_status mod_hdcp_add_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_remove_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status); +enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status); + +/* ddc functions */ +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); + +/* hdcp version helpers */ +static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || + hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT); +} + +/* hdcp state helpers */ +static inline uint8_t current_state(struct mod_hdcp *hdcp) +{ + return hdcp->state.id; +} + +static inline void set_state_id(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output, uint8_t id) +{ + memset(&hdcp->state, 0, sizeof(hdcp->state)); + hdcp->state.id = id; + /* callback timer should be reset per state */ + output->callback_stop = 1; + output->watchdog_timer_stop = 1; + HDCP_NEXT_STATE_TRACE(hdcp, id, output); +} + +static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_STATE_START && + current_state(hdcp) <= HDCP1_STATE_END); +} + +static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_DP_STATE_START && + current_state(hdcp) <= HDCP1_DP_STATE_END); +} + +static inline uint8_t is_in_hdcp2_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP2_STATE_START && + current_state(hdcp) <= HDCP2_STATE_END); +} + +static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP2_DP_STATE_START && + current_state(hdcp) <= HDCP2_DP_STATE_END); +} + +static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp) +{ + return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp)); +} + +static inline uint8_t is_hdcp2(struct mod_hdcp *hdcp) +{ + return (is_in_hdcp2_states(hdcp) || is_in_hdcp2_dp_states(hdcp)); +} + +static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_CP_NOT_DESIRED; +} + +static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_INITIALIZED; +} + +/* transition operation helpers */ +static inline void increment_stay_counter(struct mod_hdcp *hdcp) +{ + hdcp->state.stay_count++; +} + +static inline void fail_and_restart_in_ms(uint16_t time, + enum mod_hdcp_status *status, + struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; + output->watchdog_timer_needed = 0; + output->watchdog_timer_delay = 0; + *status = MOD_HDCP_STATUS_RESET_NEEDED; +} + +static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; +} + +static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time, + struct mod_hdcp_output *output) +{ + output->watchdog_timer_needed = 1; + output->watchdog_timer_delay = time; +} + +/* connection topology helpers */ +static inline uint8_t is_display_active(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE; +} + +static inline uint8_t is_display_added(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; +} + +static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; +} + +static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_active(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline struct mod_hdcp_display *get_first_added_display( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_active_display_at_index( + struct mod_hdcp *hdcp, uint8_t index) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (hdcp->connection.displays[i].index == index && + is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_empty_display_container( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (!is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline void reset_retry_counts(struct mod_hdcp *hdcp) +{ + hdcp->connection.hdcp1_retry_count = 0; + hdcp->connection.hdcp2_retry_count = 0; +} + +#endif /* HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c new file mode 100644 index 000000000000..04845e43df15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -0,0 +1,529 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) +{ + uint64_t n = 0; + uint8_t count = 0; + + memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t)); + + while (n) { + count++; + n &= (n - 1); + } + return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; +} + +static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) +{ + if (is_dp_hdcp(hdcp)) + return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_READY) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; + return (hdcp->auth.msg.hdcp1.bcaps & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bcaps & DP_BCAPS_HDCP_CAPABLE) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE; +} + +static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { + status = (hdcp->auth.msg.hdcp1.bstatus & + DP_BSTATUS_R0_PRIME_READY) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING; + } else { + status = MOD_HDCP_STATUS_INVALID_OPERATION; + } + return status; +} + +static inline enum mod_hdcp_status check_link_integrity_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & + DP_BSTATUS_LINK_FAILURE) ? + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_reauthentication_request_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_REAUTH_REQ) ? + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp >> 8) + ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE + : MOD_HDCP_STATUS_SUCCESS; + else + status = DRM_HDCP_MAX_CASCADE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus >> 8) + ? MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE + : MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.binfo_dp) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = DRM_HDCP_MAX_DEVICE_EXCEEDED(hdcp->auth.msg.hdcp1.bstatus) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ + return is_dp_hdcp(hdcp) ? + DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.binfo_dp) : + DRM_HDCP_NUM_DOWNSTREAM(hdcp->auth.msg.hdcp1.bstatus); +} + +static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ + /* device count must be greater than or equal to tracked hdcp displays */ + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, + &input->add_topology, &status, + hdcp, "add_topology")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session, + &input->create_session, &status, + hdcp, "create_session")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_an, + &input->an_write, &status, + hdcp, "an_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv, + &input->aksv_write, &status, + hdcp, "aksv_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(validate_bksv, + &input->bksv_validation, &status, + hdcp, "bksv_validation")) + goto out; + if (hdcp->auth.msg.hdcp1.ainfo) { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo, + &input->ainfo_write, &status, + hdcp, "ainfo_write")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status computations_validate_rx_test_for_repeater( + struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p, + &input->r0p_read, &status, + hdcp, "r0p_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx, + &input->rx_validation, &status, + hdcp, "rx_validation")) + goto out; + if (hdcp->connection.is_repeater) { + if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance, + &input->link_maintenance, &status, + hdcp, "link_maintenance")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_ksv_ready, + &input->ready_check, &status, + hdcp, "ready_check")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + uint8_t device_count; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo, + &input->binfo_read_dp, &status, + hdcp, "binfo_read_dp")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_no_max_cascade, + &input->max_cascade_check, &status, + hdcp, "max_cascade_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_max_devs, + &input->max_devs_check, &status, + hdcp, "max_devs_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_device_count, + &input->device_count_check, &status, + hdcp, "device_count_check")) + goto out; + device_count = get_device_count(hdcp); + hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist, + &input->ksvlist_read, &status, + hdcp, "ksvlist_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp, + &input->vp_read, &status, + hdcp, "vp_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp, + &input->ksvlist_vp_validation, &status, + hdcp, "ksvlist_vp_validation")) + goto out; + if (input->encryption != PASS) + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp, + &input->hdcp_capable_dp, &status, + hdcp, "hdcp_capable_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_r0p_available_dp, + &input->r0p_available_dp, &status, + hdcp, "r0p_available_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; +out: + return status; +} + +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str) +{ + *status = func(hdcp); + if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) { + HDCP_INPUT_PASS_TRACE(hdcp, str); + *flag = PASS; + } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) { + HDCP_INPUT_FAIL_TRACE(hdcp, str); + *flag = FAIL; + } + return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + status = wait_for_active_rx(hdcp, event_ctx, input); + break; + case H1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater(hdcp, + event_ctx, input); + break; + case H1_A45_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case H1_A8_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case H1_A9_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} + +extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); + break; + case D1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + status = wait_for_r0_prime_dp(hdcp, event_ctx, input); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater( + hdcp, event_ctx, input); + break; + case D1_A4_AUTHENTICATED: + status = authenticated_dp(hdcp, event_ctx, input); + break; + case D1_A6_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case D1_A7_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c new file mode 100644 index 000000000000..136b8011ff3f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -0,0 +1,307 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + if (input->bksv_read != PASS || input->bcaps_read != PASS) { + /* 1A-04: repeatedly attempts on port access failure */ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS); + break; + case H1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(300, output); + set_state_id(hdcp, output, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + if (input->bcaps_read != PASS || + input->r0p_read != PASS || + input->rx_validation != PASS || + (!conn->is_repeater && input->encryption != PASS)) { + /* 1A-06: consider invalid r0' a failure */ + /* 1A-08: consider bksv listed in SRM a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + callback_in_ms(0, output); + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case H1_A45_AUTHENTICATED: + if (input->link_maintenance != PASS) { + /* 1A-07: consider invalid ri' a failure */ + /* 1A-07a: consider read ri' not returned a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + case H1_A8_WAIT_FOR_READY: + if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-03: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue ksv list READY polling*/ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A9_READ_KSV_LIST); + break; + case H1_A9_READ_KSV_LIST: + if (input->bstatus_read != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS || + input->device_count_check != PASS || + input->ksvlist_read != PASS || + input->vp_read != PASS || + input->ksvlist_vp_validation != PASS || + input->encryption != PASS) { + /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */ + /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-04: consider invalid v' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + if (input->bcaps_read != PASS) { + /* 1A-04: no authentication on bcaps read failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->hdcp_capable_dp != PASS) { + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS); + break; + case D1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + if (input->bstatus_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->r0p_available_dp != PASS) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + if (input->r0p_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->rx_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1A-06: consider invalid r0' a failure + * after 3 attempts. + * 1A-08: consider bksv listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if ((!conn->is_repeater && input->encryption != PASS) || + (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY); + } else { + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case D1_A4_AUTHENTICATED: + if (input->link_integiry_check != PASS || + input->reauth_request_check != PASS) { + /* 1A-07: restart hdcp on a link integrity failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + break; + case D1_A6_WAIT_FOR_READY: + if (input->link_integiry_check == FAIL || + input->reauth_request_check == FAIL) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-04: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A7_READ_KSV_LIST); + break; + case D1_A7_READ_KSV_LIST: + if (input->binfo_read_dp != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS) { + /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->device_count_check != PASS) { + /* + * some slow dongle doesn't update + * device count as soon as downstream is connected. + * give it more time to react. + */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(1000, &status, output); + break; + } else if (input->ksvlist_read != PASS || + input->vp_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ksvlist_vp_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1B-05: consider invalid v' a failure + * after 3 attempts. + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if (input->encryption != PASS || + (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c new file mode 100644 index 000000000000..f730b94ac3c0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -0,0 +1,886 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/delay.h> + +#include "hdcp.h" + +static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp) +{ + uint8_t is_ready = 0; + + if (is_dp_hdcp(hdcp)) + is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0; + else + is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) && + (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0; + return is_ready ? MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) && + HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + else + status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + return status; +} + +static inline enum mod_hdcp_status check_reauthentication_request( + struct mod_hdcp *hdcp) +{ + uint8_t ret = 0; + + if (is_dp_hdcp(hdcp)) + ret = HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus_dp) ? + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : + MOD_HDCP_STATUS_SUCCESS; + else + ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ? + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST : + MOD_HDCP_STATUS_SUCCESS; + return ret; +} + +static inline enum mod_hdcp_status check_link_integrity_failure_dp( + struct mod_hdcp *hdcp) +{ + return HDCP_2_2_DP_RXSTATUS_LINK_FAILED(hdcp->auth.msg.hdcp2.rxstatus_dp) ? + MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint16_t size; + + if (is_dp_hdcp(hdcp)) { + status = MOD_HDCP_STATUS_SUCCESS; + } else { + status = mod_hdcp_read_rxstatus(hdcp); + if (status == MOD_HDCP_STATUS_SUCCESS) { + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; + status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING; + } + } + return status; +} + +static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (is_dp_hdcp(hdcp)) { + status = HDCP_2_2_DP_RXSTATUS_H_PRIME(hdcp->auth.msg.hdcp2.rxstatus_dp) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; + } else { + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; + status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING; + } +out: + return status; +} + +static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (is_dp_hdcp(hdcp)) { + status = HDCP_2_2_DP_RXSTATUS_PAIRING(hdcp->auth.msg.hdcp2.rxstatus_dp) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; + } else { + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; + status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING; + } +out: + return status; +} + +static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + uint16_t max_wait = 20; // units of ms + uint16_t num_polls = 5; + uint16_t wait_time = max_wait / num_polls; + + if (is_dp_hdcp(hdcp)) + status = MOD_HDCP_STATUS_INVALID_OPERATION; + else + for (; num_polls; num_polls--) { + msleep(wait_time); + + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + break; + + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; + status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING; + if (status == MOD_HDCP_STATUS_SUCCESS) + break; + } + return status; +} + +static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + uint8_t size; + + if (is_dp_hdcp(hdcp)) { + status = MOD_HDCP_STATUS_INVALID_OPERATION; + } else { + status = mod_hdcp_read_rxstatus(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; + status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING; + } +out: + return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ + return HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) + + (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4); +} + +static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ + /* device count must be greater than or equal to tracked hdcp displays */ + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static uint8_t process_rxstatus(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + enum mod_hdcp_status *status) +{ + if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxstatus, + &input->rxstatus_read, status, + hdcp, "rxstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_reauthentication_request, + &input->reauth_request_check, status, + hdcp, "reauth_request_check")) + goto out; + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(check_link_integrity_failure_dp, + &input->link_integrity_check_dp, status, + hdcp, "link_integrity_check_dp")) + goto out; + } + if (hdcp->connection.is_repeater) + if (check_receiver_id_list_ready(hdcp) == + MOD_HDCP_STATUS_SUCCESS) { + HDCP_INPUT_PASS_TRACE(hdcp, "rx_id_list_ready"); + event_ctx->rx_id_list_ready = 1; + if (is_dp_hdcp(hdcp)) + hdcp->auth.msg.hdcp2.rx_id_list_size = + sizeof(hdcp->auth.msg.hdcp2.rx_id_list); + else + hdcp->auth.msg.hdcp2.rx_id_list_size = + HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 | + hdcp->auth.msg.hdcp2.rxstatus[0]; + } +out: + return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version, + &input->hdcp2version_read, &status, + hdcp, "hdcp2version_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp2_capable, + &input->hdcp2_capable_check, &status, + hdcp, "hdcp2_capable")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, + &input->add_topology, &status, + hdcp, "add_topology")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session, + &input->create_session, &status, + hdcp, "create_session")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_ake_init, + &input->ake_init_prepare, &status, + hdcp, "ake_init_prepare")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_ake_init, + &input->ake_init_write, &status, + hdcp, "ake_init_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status validate_ake_cert(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set(check_ake_cert_available, + &input->ake_cert_available, &status, + hdcp, "ake_cert_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_ake_cert, + &input->ake_cert_read, &status, + hdcp, "ake_cert_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_ake_cert, + &input->ake_cert_validation, &status, + hdcp, "ake_cert_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_no_stored_km(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_no_stored_km, + &input->no_stored_km_write, &status, + hdcp, "no_stored_km_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_h_prime(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(check_h_prime_available, + &input->h_prime_available, &status, + hdcp, "h_prime_available")) + goto out; + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime, + &input->h_prime_read, &status, + hdcp, "h_prime_read")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_pairing_info_and_validate_h_prime( + struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(check_pairing_info_available, + &input->pairing_available, &status, + hdcp, "pairing_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_pairing_info, + &input->pairing_info_read, &status, + hdcp, "pairing_info_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime, + &input->h_prime_validation, &status, + hdcp, "h_prime_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_stored_km(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_stored_km, + &input->stored_km_write, &status, + hdcp, "stored_km_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status validate_h_prime(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(check_h_prime_available, + &input->h_prime_available, &status, + hdcp, "h_prime_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_h_prime, + &input->h_prime_read, &status, + hdcp, "h_prime_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_h_prime, + &input->h_prime_validation, &status, + hdcp, "h_prime_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, + &input->lc_init_prepare, &status, + hdcp, "lc_init_prepare")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, + &input->lc_init_write, &status, + hdcp, "lc_init_write")) + goto out; + if (is_dp_hdcp(hdcp)) + msleep(16); + else + if (!mod_hdcp_execute_and_set(poll_l_prime_available, + &input->l_prime_available_poll, &status, + hdcp, "l_prime_available_poll")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime, + &input->l_prime_read, &status, + hdcp, "l_prime_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, + &input->l_prime_validation, &status, + hdcp, "l_prime_validation")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status exchange_ks_and_test_for_repeater(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_eks, + &input->eks_prepare, &status, + hdcp, "eks_prepare")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_eks, + &input->eks_write, &status, + hdcp, "eks_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status enable_encryption(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) { + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_enable_encryption, + &input->enable_encryption, &status, + hdcp, "enable_encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp2_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_rx_id_list(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (!event_ctx->rx_id_list_ready) { + status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY; + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status verify_rx_id_list_and_send_ack(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_rx_id_list, + &input->rx_id_list_read, + &status, hdcp, "receiver_id_list_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_device_count, + &input->device_count_check, + &status, hdcp, "device_count_check")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_rx_id_list, + &input->rx_id_list_validation, + &status, hdcp, "rx_id_list_validation")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_repeater_auth_ack, + &input->repeater_auth_ack_write, + &status, hdcp, "repeater_auth_ack_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_stream_management(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) { + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) + goto out; + } + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_stream_management, + &input->prepare_stream_manage, + &status, hdcp, "prepare_stream_manage")) + goto out; + + if (!mod_hdcp_execute_and_set(mod_hdcp_write_stream_manage, + &input->stream_manage_write, + &status, hdcp, "stream_manage_write")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + if (event_ctx->event == MOD_HDCP_EVENT_CPIRQ) { + process_rxstatus(hdcp, event_ctx, input, &status); + goto out; + } + + if (is_hdmi_dvi_sl_hdcp(hdcp)) { + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (event_ctx->rx_id_list_ready) { + goto out; + } + } + if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set(check_stream_ready_available, + &input->stream_ready_available, + &status, hdcp, "stream_ready_available")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_stream_ready, + &input->stream_ready_read, + &status, hdcp, "stream_ready_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_stream_ready, + &input->stream_ready_validation, + &status, hdcp, "stream_ready_validation")) + goto out; + +out: + return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_rxcaps, + &input->rx_caps_read_dp, + &status, hdcp, "rx_caps_read_dp")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp2_capable, + &input->hdcp2_capable_check, &status, + hdcp, "hdcp2_capable_check")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status send_content_stream_type_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!process_rxstatus(hdcp, event_ctx, input, &status)) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_content_type, + &input->content_stream_type_write, &status, + hdcp, "content_stream_type_write")) + goto out; +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case H2_A0_KNOWN_HDCP2_CAPABLE_RX: + status = known_hdcp2_capable_rx(hdcp, event_ctx, input); + break; + case H2_A1_SEND_AKE_INIT: + status = send_ake_init(hdcp, event_ctx, input); + break; + case H2_A1_VALIDATE_AKE_CERT: + status = validate_ake_cert(hdcp, event_ctx, input); + break; + case H2_A1_SEND_NO_STORED_KM: + status = send_no_stored_km(hdcp, event_ctx, input); + break; + case H2_A1_READ_H_PRIME: + status = read_h_prime(hdcp, event_ctx, input); + break; + case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + status = read_pairing_info_and_validate_h_prime(hdcp, + event_ctx, input); + break; + case H2_A1_SEND_STORED_KM: + status = send_stored_km(hdcp, event_ctx, input); + break; + case H2_A1_VALIDATE_H_PRIME: + status = validate_h_prime(hdcp, event_ctx, input); + break; + case H2_A2_LOCALITY_CHECK: + status = locality_check(hdcp, event_ctx, input); + break; + case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + status = exchange_ks_and_test_for_repeater(hdcp, event_ctx, input); + break; + case H2_ENABLE_ENCRYPTION: + status = enable_encryption(hdcp, event_ctx, input); + break; + case H2_A5_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case H2_A6_WAIT_FOR_RX_ID_LIST: + status = wait_for_rx_id_list(hdcp, event_ctx, input); + break; + case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input); + break; + case H2_A9_SEND_STREAM_MANAGEMENT: + status = send_stream_management(hdcp, event_ctx, input); + break; + case H2_A9_VALIDATE_STREAM_READY: + status = validate_stream_ready(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case D2_A0_DETERMINE_RX_HDCP_CAPABLE: + status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); + break; + case D2_A1_SEND_AKE_INIT: + status = send_ake_init(hdcp, event_ctx, input); + break; + case D2_A1_VALIDATE_AKE_CERT: + status = validate_ake_cert(hdcp, event_ctx, input); + break; + case D2_A1_SEND_NO_STORED_KM: + status = send_no_stored_km(hdcp, event_ctx, input); + break; + case D2_A1_READ_H_PRIME: + status = read_h_prime(hdcp, event_ctx, input); + break; + case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + status = read_pairing_info_and_validate_h_prime(hdcp, + event_ctx, input); + break; + case D2_A1_SEND_STORED_KM: + status = send_stored_km(hdcp, event_ctx, input); + break; + case D2_A1_VALIDATE_H_PRIME: + status = validate_h_prime(hdcp, event_ctx, input); + break; + case D2_A2_LOCALITY_CHECK: + status = locality_check(hdcp, event_ctx, input); + break; + case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + status = exchange_ks_and_test_for_repeater(hdcp, + event_ctx, input); + break; + case D2_SEND_CONTENT_STREAM_TYPE: + status = send_content_stream_type_dp(hdcp, event_ctx, input); + break; + case D2_ENABLE_ENCRYPTION: + status = enable_encryption(hdcp, event_ctx, input); + break; + case D2_A5_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case D2_A6_WAIT_FOR_RX_ID_LIST: + status = wait_for_rx_id_list(hdcp, event_ctx, input); + break; + case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + status = verify_rx_id_list_and_send_ack(hdcp, event_ctx, input); + break; + case D2_A9_SEND_STREAM_MANAGEMENT: + status = send_stream_management(hdcp, event_ctx, input); + break; + case D2_A9_VALIDATE_STREAM_READY: + status = validate_stream_ready(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c new file mode 100644 index 000000000000..e8043c903a84 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c @@ -0,0 +1,674 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case H2_A0_KNOWN_HDCP2_CAPABLE_RX: + if (input->hdcp2version_read != PASS || + input->hdcp2_capable_check != PASS) { + adjust->hdcp2.disable = 1; + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_SEND_AKE_INIT); + } + break; + case H2_A1_SEND_AKE_INIT: + if (input->add_topology != PASS || + input->create_session != PASS || + input->ake_init_prepare != PASS) { + /* out of sync with psp state */ + adjust->hdcp2.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ake_init_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_VALIDATE_AKE_CERT); + break; + case H2_A1_VALIDATE_AKE_CERT: + if (input->ake_cert_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-08: consider ake timeout a failure */ + /* some hdmi receivers are not ready for HDCP + * immediately after video becomes active, + * delay 1s before retry on first HDCP message + * timeout. + */ + fail_and_restart_in_ms(1000, &status, output); + } else { + /* continue ake cert polling*/ + callback_in_ms(10, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->ake_cert_read != PASS || + input->ake_cert_validation != PASS) { + /* + * 1A-09: consider invalid ake cert a failure + * 1A-10: consider receiver id listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_km_stored && + !adjust->hdcp2.force_no_stored_km) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_SEND_STORED_KM); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_SEND_NO_STORED_KM); + } + break; + case H2_A1_SEND_NO_STORED_KM: + if (input->no_stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (adjust->hdcp2.increase_h_prime_timeout) + set_watchdog_in_ms(hdcp, 2000, output); + else + set_watchdog_in_ms(hdcp, 1000, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_READ_H_PRIME); + break; + case H2_A1_READ_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-11-3: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue h' polling */ + callback_in_ms(100, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME); + break; + case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + if (input->pairing_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-12: consider pairing info timeout + * a failure + */ + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue pairing info polling */ + callback_in_ms(20, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->pairing_info_read != PASS || + input->h_prime_validation != PASS) { + /* 1A-11-1: consider invalid h' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); + break; + case H2_A1_SEND_STORED_KM: + if (input->stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A1_VALIDATE_H_PRIME); + break; + case H2_A1_VALIDATE_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1A-11-2: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue h' polling */ + callback_in_ms(20, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->h_prime_validation != PASS) { + /* 1A-11-1: consider invalid h' a failure */ + adjust->hdcp2.force_no_stored_km = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); + break; + case H2_A2_LOCALITY_CHECK: + if (hdcp->state.stay_count > 10 || + input->lc_init_prepare != PASS || + input->lc_init_write != PASS || + input->l_prime_available_poll != PASS || + input->l_prime_read != PASS) { + /* + * 1A-05: consider disconnection after LC init a failure + * 1A-13-1: consider invalid l' a failure + * 1A-13-2: consider l' timeout a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->l_prime_validation != PASS) { + callback_in_ms(0, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); + break; + case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + if (input->eks_prepare != PASS || + input->eks_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 3000, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A6_WAIT_FOR_RX_ID_LIST); + } else { + /* some CTS equipment requires a delay GREATER than + * 200 ms, so delay 210 ms instead of 200 ms + */ + callback_in_ms(210, output); + set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION); + } + break; + case H2_ENABLE_ENCRYPTION: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + /* + * 1A-07: restart hdcp on REAUTH_REQ + * 1B-08: restart hdcp on REAUTH_REQ + */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->enable_encryption != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A5_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + case H2_A5_AUTHENTICATED: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + case H2_A6_WAIT_FOR_RX_ID_LIST: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!event_ctx->rx_id_list_ready) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-02: consider rx id list timeout a failure */ + /* some CTS equipment's actual timeout + * measurement is slightly greater than 3000 ms. + * Delay 100 ms to ensure it is fully timeout + * before re-authentication. + */ + fail_and_restart_in_ms(100, &status, output); + } else { + callback_in_ms(300, output); + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->rx_id_list_read != PASS || + input->device_count_check != PASS || + input->rx_id_list_validation != PASS || + input->repeater_auth_ack_write != PASS) { + /* 1B-03: consider invalid v' a failure + * 1B-04: consider MAX_DEVS_EXCEEDED a failure + * 1B-05: consider MAX_CASCADE_EXCEEDED a failure + * 1B-06: consider invalid seq_num_V a failure + * 1B-09: consider seq_num_V rollover a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); + break; + case H2_A9_SEND_STREAM_MANAGEMENT: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->prepare_stream_manage != PASS || + input->stream_manage_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_VALIDATE_STREAM_READY); + break; + case H2_A9_VALIDATE_STREAM_READY: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->stream_ready_available != PASS) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-10-2: restart content stream management on + * stream ready timeout + */ + hdcp->auth.count.stream_management_retry_count++; + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); + } else { + callback_in_ms(10, output); + increment_stay_counter(hdcp); + } + break; + } else if (input->stream_ready_read != PASS || + input->stream_ready_validation != PASS) { + /* + * 1B-10-1: restart content stream management + * on invalid M' + */ + if (hdcp->auth.count.stream_management_retry_count > 10) { + fail_and_restart_in_ms(0, &status, output); + } else { + hdcp->auth.count.stream_management_retry_count++; + callback_in_ms(0, output); + set_state_id(hdcp, output, H2_A9_SEND_STREAM_MANAGEMENT); + } + break; + } + callback_in_ms(200, output); + set_state_id(hdcp, output, H2_ENABLE_ENCRYPTION); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp2 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case D2_A0_DETERMINE_RX_HDCP_CAPABLE: + if (input->rx_caps_read_dp != PASS || + input->hdcp2_capable_check != PASS) { + adjust->hdcp2.disable = 1; + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A1_SEND_AKE_INIT); + } + break; + case D2_A1_SEND_AKE_INIT: + if (input->add_topology != PASS || + input->create_session != PASS || + input->ake_init_prepare != PASS) { + /* out of sync with psp state */ + adjust->hdcp2.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ake_init_write != PASS) { + /* possibly display not ready */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(100, output); + set_state_id(hdcp, output, D2_A1_VALIDATE_AKE_CERT); + break; + case D2_A1_VALIDATE_AKE_CERT: + if (input->ake_cert_read != PASS || + input->ake_cert_validation != PASS) { + /* + * 1A-08: consider invalid ake cert a failure + * 1A-09: consider receiver id listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_km_stored && + !adjust->hdcp2.force_no_stored_km) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A1_SEND_STORED_KM); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A1_SEND_NO_STORED_KM); + } + break; + case D2_A1_SEND_NO_STORED_KM: + if (input->no_stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (adjust->hdcp2.increase_h_prime_timeout) + set_watchdog_in_ms(hdcp, 2000, output); + else + set_watchdog_in_ms(hdcp, 1000, output); + set_state_id(hdcp, output, D2_A1_READ_H_PRIME); + break; + case D2_A1_READ_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* 1A-10-3: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + set_state_id(hdcp, output, D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME); + break; + case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + if (input->pairing_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* + * 1A-11: consider pairing info timeout + * a failure + */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } else if (input->pairing_info_read != PASS || + input->h_prime_validation != PASS) { + /* 1A-10-1: consider invalid h' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); + break; + case D2_A1_SEND_STORED_KM: + if (input->stored_km_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 200, output); + set_state_id(hdcp, output, D2_A1_VALIDATE_H_PRIME); + break; + case D2_A1_VALIDATE_H_PRIME: + if (input->h_prime_available != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* 1A-10-2: consider h' timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } else if (input->h_prime_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->h_prime_validation != PASS) { + /* 1A-10-1: consider invalid h' a failure */ + adjust->hdcp2.force_no_stored_km = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); + break; + case D2_A2_LOCALITY_CHECK: + if (hdcp->state.stay_count > 10 || + input->lc_init_prepare != PASS || + input->lc_init_write != PASS || + input->l_prime_read != PASS) { + /* 1A-12: consider invalid l' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->l_prime_validation != PASS) { + callback_in_ms(0, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); + break; + case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + if (input->eks_prepare != PASS || + input->eks_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 3000, output); + set_state_id(hdcp, output, D2_A6_WAIT_FOR_RX_ID_LIST); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_SEND_CONTENT_STREAM_TYPE); + } + break; + case D2_SEND_CONTENT_STREAM_TYPE: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS || + input->content_stream_type_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(210, output); + set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION); + break; + case D2_ENABLE_ENCRYPTION: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + /* + * 1A-07: restart hdcp on REAUTH_REQ + * 1B-08: restart hdcp on REAUTH_REQ + */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->enable_encryption != PASS || + (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_state_id(hdcp, output, D2_A5_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + case D2_A5_AUTHENTICATED: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->link_integrity_check_dp != PASS) { + if (hdcp->connection.hdcp2_retry_count >= 1) + adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready && conn->is_repeater) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } + increment_stay_counter(hdcp); + break; + case D2_A6_WAIT_FOR_RX_ID_LIST: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (!event_ctx->rx_id_list_ready) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + /* 1B-02: consider rx id list timeout a failure */ + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS || + input->rx_id_list_read != PASS || + input->device_count_check != PASS || + input->rx_id_list_validation != PASS || + input->repeater_auth_ack_write != PASS) { + /* + * 1B-03: consider invalid v' a failure + * 1B-04: consider MAX_DEVS_EXCEEDED a failure + * 1B-05: consider MAX_CASCADE_EXCEEDED a failure + * 1B-06: consider invalid seq_num_V a failure + * 1B-09: consider seq_num_V rollover a failure + */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); + break; + case D2_A9_SEND_STREAM_MANAGEMENT: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->prepare_stream_manage != PASS || + input->stream_manage_write != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(100, output); + set_state_id(hdcp, output, D2_A9_VALIDATE_STREAM_READY); + break; + case D2_A9_VALIDATE_STREAM_READY: + if (input->rxstatus_read != PASS || + input->reauth_request_check != PASS || + input->link_integrity_check_dp != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (event_ctx->rx_id_list_ready) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK); + break; + } else if (input->stream_ready_read != PASS || + input->stream_ready_validation != PASS) { + /* + * 1B-10-1: restart content stream management + * on invalid M' + * 1B-10-2: consider stream ready timeout a failure + */ + if (hdcp->auth.count.stream_management_retry_count > 10) { + fail_and_restart_in_ms(0, &status, output); + } else { + hdcp->auth.count.stream_management_retry_count++; + callback_in_ms(0, output); + set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); + } + break; + } + callback_in_ms(200, output); + set_state_id(hdcp, output, D2_ENABLE_ENCRYPTION); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c new file mode 100644 index 000000000000..ff9d54812e62 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -0,0 +1,631 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/ +#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */ +#define HDCP_MAX_AUX_TRANSACTION_SIZE 16 + +enum mod_hdcp_ddc_message_id { + MOD_HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + MOD_HDCP_MESSAGE_ID_READ_BKSV = 0, + MOD_HDCP_MESSAGE_ID_READ_RI_R0, + MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + MOD_HDCP_MESSAGE_ID_WRITE_AN, + MOD_HDCP_MESSAGE_ID_READ_VH_X, + MOD_HDCP_MESSAGE_ID_READ_VH_0, + MOD_HDCP_MESSAGE_ID_READ_VH_1, + MOD_HDCP_MESSAGE_ID_READ_VH_2, + MOD_HDCP_MESSAGE_ID_READ_VH_3, + MOD_HDCP_MESSAGE_ID_READ_VH_4, + MOD_HDCP_MESSAGE_ID_READ_BCAPS, + MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + MOD_HDCP_MESSAGE_ID_READ_BINFO, + + /* HDCP 2.2 */ + + MOD_HDCP_MESSAGE_ID_HDCP2VERSION, + MOD_HDCP_MESSAGE_ID_RX_CAPS, + MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, + MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, + MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, + MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + + MOD_HDCP_MESSAGE_ID_MAX +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, + [MOD_HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70, + [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0 +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, + [MOD_HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, + [MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, + [MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, + [MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, + [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, + [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, + [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, + [MOD_HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, + [MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 +}; + +static enum mod_hdcp_status read(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + success = hdcp->config.ddc.funcs.read_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp_i2c_offsets[msg_id], + buf, + (uint32_t)buf_len); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len, + uint8_t read_size) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + while (buf_len > 0) { + cur_size = MIN(buf_len, read_size); + status = read(hdcp, msg_id, buf + data_offset, cur_size); + + if (status != MOD_HDCP_STATUS_SUCCESS) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + + return status; +} + +static enum mod_hdcp_status write(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.write_dpcd( + hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + hdcp->buf[0] = hdcp_i2c_offsets[msg_id]; + memmove(&hdcp->buf[1], buf, buf_len); + success = hdcp->config.ddc.funcs.write_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp->buf, + (uint32_t)(buf_len+1)); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV, + hdcp->auth.msg.hdcp1.bksv, + sizeof(hdcp->auth.msg.hdcp1.bksv)); +} + +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS, + &hdcp->auth.msg.hdcp1.bcaps, + sizeof(hdcp->auth.msg.hdcp1.bcaps)); +} + +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + 1); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + sizeof(hdcp->auth.msg.hdcp1.bstatus)); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0, + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, + sizeof(hdcp->auth.msg.hdcp1.r0p)); +} + +/* special case, reading repeatedly at the same address, don't use read() */ +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size, + KSV_READ_SIZE); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0, + &hdcp->auth.msg.hdcp1.vp[0], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1, + &hdcp->auth.msg.hdcp1.vp[4], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2, + &hdcp->auth.msg.hdcp1.vp[8], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3, + &hdcp->auth.msg.hdcp1.vp[12], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4, + &hdcp->auth.msg.hdcp1.vp[16], 4); +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); + else + status = MOD_HDCP_STATUS_INVALID_OPERATION; + + return status; +} + +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + hdcp->auth.msg.hdcp1.aksv, + sizeof(hdcp->auth.msg.hdcp1.aksv)); +} + +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + &hdcp->auth.msg.hdcp1.ainfo, + sizeof(hdcp->auth.msg.hdcp1.ainfo)); +} + +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN, + hdcp->auth.msg.hdcp1.an, + sizeof(hdcp->auth.msg.hdcp1.an)); +} + +enum mod_hdcp_status mod_hdcp_read_hdcp2version(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = MOD_HDCP_STATUS_INVALID_OPERATION; + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_HDCP2VERSION, + &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, + sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); + + return status; +} + +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (!is_dp_hdcp(hdcp)) + status = MOD_HDCP_STATUS_INVALID_OPERATION; + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_RX_CAPS, + hdcp->auth.msg.hdcp2.rxcaps_dp, + sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); + + return status; +} + +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, + &hdcp->auth.msg.hdcp2.rxstatus_dp, + 1); + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RXSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, + sizeof(hdcp->auth.msg.hdcp2.rxstatus)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.ake_cert[0] = 3; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + hdcp->auth.msg.hdcp2.ake_cert+1, + sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + hdcp->auth.msg.hdcp2.ake_cert, + sizeof(hdcp->auth.msg.hdcp2.ake_cert)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + hdcp->auth.msg.hdcp2.ake_h_prime+1, + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + hdcp->auth.msg.hdcp2.ake_h_prime, + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + hdcp->auth.msg.hdcp2.ake_pairing_info+1, + sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + hdcp->auth.msg.hdcp2.ake_pairing_info, + sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + hdcp->auth.msg.hdcp2.lc_l_prime+1, + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + hdcp->auth.msg.hdcp2.lc_l_prime, + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.rx_id_list[0] = 12; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + hdcp->auth.msg.hdcp2.rx_id_list+1, + sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + hdcp->auth.msg.hdcp2.rx_id_list, + hdcp->auth.msg.hdcp2.rx_id_list_size); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) { + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17; + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1); + + } else { + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); + } + return status; +} + +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, + hdcp->auth.msg.hdcp2.ake_init+1, + sizeof(hdcp->auth.msg.hdcp2.ake_init)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_INIT, + hdcp->auth.msg.hdcp2.ake_init, + sizeof(hdcp->auth.msg.hdcp2.ake_init)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + hdcp->auth.msg.hdcp2.ake_no_stored_km+1, + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + hdcp->auth.msg.hdcp2.ake_no_stored_km, + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + hdcp->auth.msg.hdcp2.ake_stored_km+1, + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + hdcp->auth.msg.hdcp2.ake_stored_km, + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, + hdcp->auth.msg.hdcp2.lc_init+1, + sizeof(hdcp->auth.msg.hdcp2.lc_init)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT, + hdcp->auth.msg.hdcp2.lc_init, + sizeof(hdcp->auth.msg.hdcp2.lc_init)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + hdcp->auth.msg.hdcp2.ske_eks+1, + sizeof(hdcp->auth.msg.hdcp2.ske_eks)-1); + else + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + hdcp->auth.msg.hdcp2.ske_eks, + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + hdcp->auth.msg.hdcp2.repeater_auth_ack+1, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)-1); + else + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + hdcp->auth.msg.hdcp2.repeater_auth_ack, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + hdcp->auth.msg.hdcp2.repeater_auth_stream_manage+1, + hdcp->auth.msg.hdcp2.stream_manage_size-1); + else + status = write(hdcp, + MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, + hdcp->auth.msg.hdcp2.stream_manage_size); + return status; +} + +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + hdcp->auth.msg.hdcp2.content_stream_type_dp+1, + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)-1); + else + status = MOD_HDCP_STATUS_INVALID_OPERATION; + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c new file mode 100644 index 000000000000..724ebcee9a19 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -0,0 +1,281 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "hdcp.h" + +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size) +{ + const uint8_t bytes_per_line = 16, + byte_size = 3, + newline_size = 1, + terminator_size = 1; + uint32_t line_count = msg_size / bytes_per_line, + trailing_bytes = msg_size % bytes_per_line; + uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count + + byte_size * trailing_bytes + newline_size + terminator_size; + uint32_t buf_pos = 0; + uint32_t i = 0; + + if (buf_size >= target_size) { + for (i = 0; i < msg_size; i++) { + if (i % bytes_per_line == 0) + buf[buf_pos++] = '\n'; + sprintf(&buf[buf_pos], "%02X ", msg[i]); + buf_pos += byte_size; + } + buf[buf_pos++] = '\0'; + } +} + +char *mod_hdcp_status_to_str(int32_t status) +{ + switch (status) { + case MOD_HDCP_STATUS_SUCCESS: + return "MOD_HDCP_STATUS_SUCCESS"; + case MOD_HDCP_STATUS_FAILURE: + return "MOD_HDCP_STATUS_FAILURE"; + case MOD_HDCP_STATUS_RESET_NEEDED: + return "MOD_HDCP_STATUS_RESET_NEEDED"; + case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND: + return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND"; + case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND: + return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND"; + case MOD_HDCP_STATUS_INVALID_STATE: + return "MOD_HDCP_STATUS_INVALID_STATE"; + case MOD_HDCP_STATUS_NOT_IMPLEMENTED: + return "MOD_HDCP_STATUS_NOT_IMPLEMENTED"; + case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE: + return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE"; + case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE: + return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE"; + case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER: + return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER"; + case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE: + return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE"; + case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: + return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED: + return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED"; + case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV: + return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV"; + case MOD_HDCP_STATUS_DDC_FAILURE: + return "MOD_HDCP_STATUS_DDC_FAILURE"; + case MOD_HDCP_STATUS_INVALID_OPERATION: + return "MOD_HDCP_STATUS_INVALID_OPERATION"; + case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE: + return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE"; + case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING: + return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING"; + case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING: + return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED: + return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED: + return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED"; + case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY: + return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY"; + case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION: + return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING: + return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING"; + case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST: + return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST"; + case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE"; + case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE: + return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE"; + default: + return "MOD_HDCP_STATUS_UNKNOWN"; + } +} + +char *mod_hdcp_state_id_to_str(int32_t id) +{ + switch (id) { + case HDCP_UNINITIALIZED: + return "HDCP_UNINITIALIZED"; + case HDCP_INITIALIZED: + return "HDCP_INITIALIZED"; + case HDCP_CP_NOT_DESIRED: + return "HDCP_CP_NOT_DESIRED"; + case H1_A0_WAIT_FOR_ACTIVE_RX: + return "H1_A0_WAIT_FOR_ACTIVE_RX"; + case H1_A1_EXCHANGE_KSVS: + return "H1_A1_EXCHANGE_KSVS"; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER"; + case H1_A45_AUTHENTICATED: + return "H1_A45_AUTHENTICATED"; + case H1_A8_WAIT_FOR_READY: + return "H1_A8_WAIT_FOR_READY"; + case H1_A9_READ_KSV_LIST: + return "H1_A9_READ_KSV_LIST"; + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + return "D1_A0_DETERMINE_RX_HDCP_CAPABLE"; + case D1_A1_EXCHANGE_KSVS: + return "D1_A1_EXCHANGE_KSVS"; + case D1_A23_WAIT_FOR_R0_PRIME: + return "D1_A23_WAIT_FOR_R0_PRIME"; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER"; + case D1_A4_AUTHENTICATED: + return "D1_A4_AUTHENTICATED"; + case D1_A6_WAIT_FOR_READY: + return "D1_A6_WAIT_FOR_READY"; + case D1_A7_READ_KSV_LIST: + return "D1_A7_READ_KSV_LIST"; + case H2_A0_KNOWN_HDCP2_CAPABLE_RX: + return "H2_A0_KNOWN_HDCP2_CAPABLE_RX"; + case H2_A1_SEND_AKE_INIT: + return "H2_A1_SEND_AKE_INIT"; + case H2_A1_VALIDATE_AKE_CERT: + return "H2_A1_VALIDATE_AKE_CERT"; + case H2_A1_SEND_NO_STORED_KM: + return "H2_A1_SEND_NO_STORED_KM"; + case H2_A1_READ_H_PRIME: + return "H2_A1_READ_H_PRIME"; + case H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + return "H2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME"; + case H2_A1_SEND_STORED_KM: + return "H2_A1_SEND_STORED_KM"; + case H2_A1_VALIDATE_H_PRIME: + return "H2_A1_VALIDATE_H_PRIME"; + case H2_A2_LOCALITY_CHECK: + return "H2_A2_LOCALITY_CHECK"; + case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + return "H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER"; + case H2_ENABLE_ENCRYPTION: + return "H2_ENABLE_ENCRYPTION"; + case H2_A5_AUTHENTICATED: + return "H2_A5_AUTHENTICATED"; + case H2_A6_WAIT_FOR_RX_ID_LIST: + return "H2_A6_WAIT_FOR_RX_ID_LIST"; + case H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + return "H2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK"; + case H2_A9_SEND_STREAM_MANAGEMENT: + return "H2_A9_SEND_STREAM_MANAGEMENT"; + case H2_A9_VALIDATE_STREAM_READY: + return "H2_A9_VALIDATE_STREAM_READY"; + case D2_A0_DETERMINE_RX_HDCP_CAPABLE: + return "D2_A0_DETERMINE_RX_HDCP_CAPABLE"; + case D2_A1_SEND_AKE_INIT: + return "D2_A1_SEND_AKE_INIT"; + case D2_A1_VALIDATE_AKE_CERT: + return "D2_A1_VALIDATE_AKE_CERT"; + case D2_A1_SEND_NO_STORED_KM: + return "D2_A1_SEND_NO_STORED_KM"; + case D2_A1_READ_H_PRIME: + return "D2_A1_READ_H_PRIME"; + case D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME: + return "D2_A1_READ_PAIRING_INFO_AND_VALIDATE_H_PRIME"; + case D2_A1_SEND_STORED_KM: + return "D2_A1_SEND_STORED_KM"; + case D2_A1_VALIDATE_H_PRIME: + return "D2_A1_VALIDATE_H_PRIME"; + case D2_A2_LOCALITY_CHECK: + return "D2_A2_LOCALITY_CHECK"; + case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: + return "D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER"; + case D2_SEND_CONTENT_STREAM_TYPE: + return "D2_SEND_CONTENT_STREAM_TYPE"; + case D2_ENABLE_ENCRYPTION: + return "D2_ENABLE_ENCRYPTION"; + case D2_A5_AUTHENTICATED: + return "D2_A5_AUTHENTICATED"; + case D2_A6_WAIT_FOR_RX_ID_LIST: + return "D2_A6_WAIT_FOR_RX_ID_LIST"; + case D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK: + return "D2_A78_VERIFY_RX_ID_LIST_AND_SEND_ACK"; + case D2_A9_SEND_STREAM_MANAGEMENT: + return "D2_A9_SEND_STREAM_MANAGEMENT"; + case D2_A9_VALIDATE_STREAM_READY: + return "D2_A9_VALIDATE_STREAM_READY"; + default: + return "UNKNOWN_STATE_ID"; + }; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h new file mode 100644 index 000000000000..ff91373ebada --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -0,0 +1,193 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_LOG_H_ +#define MOD_HDCP_LOG_H_ + +#ifdef CONFIG_DRM_AMD_DC_HDCP +#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__) +#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) +#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#endif + +/* default logs */ +#define HDCP_ERROR_TRACE(hdcp, status) \ + HDCP_LOG_ERR(hdcp, \ + "[Link %d] WARNING %s IN STATE %s", \ + hdcp->config.index, \ + mod_hdcp_status_to_str(status), \ + mod_hdcp_state_id_to_str(hdcp->state.id)) +#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 1.4 enabled on display %d", \ + hdcp->config.index, displayIndex) +#define HDCP_HDCP2_ENABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 2.2 enabled on display %d", \ + hdcp->config.index, displayIndex) +/* state machine logs */ +#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] HDCP_REMOVE_DISPLAY index %d", \ + hdcp->config.index, displayIndex) +#define HDCP_INPUT_PASS_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tPASS %s", \ + hdcp->config.index, str) +#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tFAIL %s", \ + hdcp->config.index, str) +#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \ + if (output->watchdog_timer_needed) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s with %d ms watchdog", \ + hdcp->config.index, \ + mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \ + else \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s", hdcp->config.index, \ + mod_hdcp_state_id_to_str(id)); \ +} while (0) +#define HDCP_TIMEOUT_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index) +#define HDCP_CPIRQ_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) +#define HDCP_EVENT_TRACE(hdcp, event) \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp) +/* TODO: find some way to tell if logging is off to save time */ +#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \ + msg_name, hdcp->buf); \ +} while (0) +#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \ + hdcp->config.index, msg_name,\ + hdcp->buf); \ +} while (0) +#define HDCP_FULL_DDC_TRACE(hdcp) do { \ + if (is_hdcp1(hdcp)) { \ + HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ + sizeof(hdcp->auth.msg.hdcp1.bksv)); \ + HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ + sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ + sizeof(hdcp->auth.msg.hdcp1.an)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ + sizeof(hdcp->auth.msg.hdcp1.aksv)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ + sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ + HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ + sizeof(hdcp->auth.msg.hdcp1.r0p)); \ + HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ + HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ + hdcp->auth.msg.hdcp1.ksvlist_size); \ + HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ + sizeof(hdcp->auth.msg.hdcp1.vp)); \ + } else { \ + HDCP_DDC_READ_TRACE(hdcp, "HDCP2Version", \ + &hdcp->auth.msg.hdcp2.hdcp2version_hdmi, \ + sizeof(hdcp->auth.msg.hdcp2.hdcp2version_hdmi)); \ + HDCP_DDC_READ_TRACE(hdcp, "Rx Caps", hdcp->auth.msg.hdcp2.rxcaps_dp, \ + sizeof(hdcp->auth.msg.hdcp2.rxcaps_dp)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKE Init", hdcp->auth.msg.hdcp2.ake_init, \ + sizeof(hdcp->auth.msg.hdcp2.ake_init)); \ + HDCP_DDC_READ_TRACE(hdcp, "AKE Cert", hdcp->auth.msg.hdcp2.ake_cert, \ + sizeof(hdcp->auth.msg.hdcp2.ake_cert)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Stored KM", \ + hdcp->auth.msg.hdcp2.ake_stored_km, \ + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "No Stored KM", \ + hdcp->auth.msg.hdcp2.ake_no_stored_km, \ + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); \ + HDCP_DDC_READ_TRACE(hdcp, "H'", hdcp->auth.msg.hdcp2.ake_h_prime, \ + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); \ + HDCP_DDC_READ_TRACE(hdcp, "Pairing Info", \ + hdcp->auth.msg.hdcp2.ake_pairing_info, \ + sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "LC Init", hdcp->auth.msg.hdcp2.lc_init, \ + sizeof(hdcp->auth.msg.hdcp2.lc_init)); \ + HDCP_DDC_READ_TRACE(hdcp, "L'", hdcp->auth.msg.hdcp2.lc_l_prime, \ + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Exchange KS", hdcp->auth.msg.hdcp2.ske_eks, \ + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); \ + HDCP_DDC_READ_TRACE(hdcp, "Rx Status", \ + (uint8_t *)&hdcp->auth.msg.hdcp2.rxstatus, \ + sizeof(hdcp->auth.msg.hdcp2.rxstatus)); \ + HDCP_DDC_READ_TRACE(hdcp, "Rx Id List", \ + hdcp->auth.msg.hdcp2.rx_id_list, \ + hdcp->auth.msg.hdcp2.rx_id_list_size); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Rx Id List Ack", \ + hdcp->auth.msg.hdcp2.repeater_auth_ack, \ + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Management", \ + hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, \ + hdcp->auth.msg.hdcp2.stream_manage_size); \ + HDCP_DDC_READ_TRACE(hdcp, "Stream Ready", \ + hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, \ + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "Content Stream Type", \ + hdcp->auth.msg.hdcp2.content_stream_type_dp, \ + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); \ + } \ +} while (0) +#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \ + hdcp->config.index) +#define HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp2 session", \ + hdcp->config.index) +#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index) +#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index) +#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \ +} while (0) +#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ +} while (0) + +#endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c new file mode 100644 index 000000000000..ef4eb55f4474 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -0,0 +1,831 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#define MAX_NUM_DISPLAYS 24 + + +#include "hdcp.h" + +#include "amdgpu.h" +#include "hdcp_psp.h" + +static void hdcp2_message_init(struct mod_hdcp *hdcp, + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *in) +{ + in->session_handle = hdcp->auth.id; + in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg1_desc.msg_size = 0; + in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg2_desc.msg_size = 0; + in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE; + in->process.msg3_desc.msg_size = 0; +} +enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + uint8_t i; + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (is_display_added(&(hdcp->connection.displays[i]))) { + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + display = &hdcp->connection.displays[i]; + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE; + HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + struct mod_hdcp_link *link = &hdcp->connection.link; + uint8_t i; + + if (!psp->dtm_context.dtm_initialized) { + DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) { + display = &hdcp->connection.displays[i]; + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1; + dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller; + dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line; + dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be; + dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; + dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; + dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; + + hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; + hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; + memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, + sizeof(hdcp->auth.msg.hdcp1.aksv)); + memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary, + sizeof(hdcp->auth.msg.hdcp1.an)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; + + HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id; + + memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv, + TA_HDCP__HDCP1_KSV_SIZE); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p; + hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) { + /* needs second part of authentication */ + hdcp->connection.is_repeater = 1; + } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) { + hdcp->connection.is_repeater = 0; + } else + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION; + + if (!is_dp_mst_hdcp(hdcp)) { + display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index); + } + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id; + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size; + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp, + sizeof(hdcp->auth.msg.hdcp1.vp)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo = + is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + int i = 0; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->connection.displays[i].adjust.disable) + continue; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; + + hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id; + + hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; + + return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status) +{ + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS) + return MOD_HDCP_STATUS_FAILURE; + + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized"); + return MOD_HDCP_STATUS_FAILURE; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; + + if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) + hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0; + else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_1) + hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1; + else if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_MAX) + hdcp_cmd->in_msg.hdcp2_create_session_v2.negotiate_content_type = + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE; + + hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp2_destroy_session.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE; + + HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_INIT; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE; + + memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ake_init)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT; + msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT; + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_cert, + sizeof(hdcp->auth.msg.hdcp2.ake_cert)); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM; + msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)); + + memcpy(hdcp->auth.msg.hdcp2.ake_stored_km, + &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)], + sizeof(hdcp->auth.msg.hdcp2.ake_stored_km)); + + if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { + hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; + hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; + return MOD_HDCP_STATUS_SUCCESS; + } + + return MOD_HDCP_STATUS_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME; + msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME; + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.ake_h_prime, + sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)); + + if (!hdcp->connection.is_km_stored) { + msg_in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO; + msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO; + memcpy(&msg_in->process.receiver_message[sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)], + hdcp->auth.msg.hdcp2.ake_pairing_info, sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)); + } + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; + + if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; + else if (!hdcp->connection.is_km_stored && + msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE; + + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__LC_INIT; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.lc_init)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME; + msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME; + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.lc_l_prime, + sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + + if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS; + + if (is_dp_hdcp(hdcp)) + msg_in->prepare.msg2_id = TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.ske_eks)); + msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks); + + if (is_dp_hdcp(hdcp)) { + memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp, + &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)], + sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp)); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + + hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE; + + if (!is_dp_mst_hdcp(hdcp)) { + display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST; + msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list); + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.rx_id_list, + sizeof(hdcp->auth.msg.hdcp2.rx_id_list)); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; + + memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack)); + + if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) { + hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0; + hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0; + return MOD_HDCP_STATUS_SUCCESS; + } + + + return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + uint8_t i; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->connection.displays[i].adjust.disable) + continue; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + break; + + hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + } + + return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->prepare.msg1_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE; + + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE; + + hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size; + + memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0], + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2; + msg_out = &hdcp_cmd->out_msg.hdcp2_prepare_process_authentication_message_v2; + + hdcp2_message_init(hdcp, msg_in); + + msg_in->process.msg1_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY; + + msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready); + + memcpy(&msg_in->process.receiver_message[0], hdcp->auth.msg.hdcp2.repeater_auth_stream_ready, + sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + return (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) && + (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id; + hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS; + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_FAILURE; + + if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) { + if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1) + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON; + else + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON; + } + + return MOD_HDCP_STATUS_SUCCESS; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h new file mode 100644 index 000000000000..82a5e997d573 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -0,0 +1,466 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MODULES_HDCP_HDCP_PSP_H_ +#define MODULES_HDCP_HDCP_PSP_H_ + +/* + * NOTE: These parameters are a one-to-one copy of the + * parameters required by PSP + */ +enum bgd_security_hdcp_encryption_level { + HDCP_ENCRYPTION_LEVEL__INVALID = 0, + HDCP_ENCRYPTION_LEVEL__OFF, + HDCP_ENCRYPTION_LEVEL__ON +}; + +enum bgd_security_hdcp2_content_type { + HDCP2_CONTENT_TYPE__INVALID = 0, + HDCP2_CONTENT_TYPE__TYPE0, + HDCP2_CONTENT_TYPE__TYPE1 +}; +enum ta_dtm_command { + TA_DTM_COMMAND__UNUSED_1 = 1, + TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2, + TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE +}; + +/* DTM related enumerations */ +/**********************************************************/ + +enum ta_dtm_status { + TA_DTM_STATUS__SUCCESS = 0x00, + TA_DTM_STATUS__GENERIC_FAILURE = 0x01, + TA_DTM_STATUS__INVALID_PARAMETER = 0x02, + TA_DTM_STATUS__NULL_POINTER = 0x3 +}; + +/* input/output structures for DTM commands */ +/**********************************************************/ +/** + * Input structures + */ +enum ta_dtm_hdcp_version_max_supported { + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23 +}; + +struct ta_dtm_topology_update_input_v2 { + /* display handle is unique across the driver and is used to identify a display */ + /* for all security interfaces which reference displays such as HDCP */ + uint32_t display_handle; + uint32_t is_active; + uint32_t is_miracast; + uint32_t controller; + uint32_t ddc_line; + uint32_t dig_be; + uint32_t dig_fe; + uint32_t dp_mst_vcid; + uint32_t is_assr; + uint32_t max_hdcp_supported_version; +}; + +struct ta_dtm_topology_assr_enable { + uint32_t display_topology_dig_be_index; +}; + +/** + * Output structures + */ + +/* No output structures yet */ + +union ta_dtm_cmd_input { + struct ta_dtm_topology_update_input_v2 topology_update_v2; + struct ta_dtm_topology_assr_enable topology_assr_enable; +}; + +union ta_dtm_cmd_output { + uint32_t reserved; +}; + +struct ta_dtm_shared_memory { + uint32_t cmd_id; + uint32_t resp_id; + enum ta_dtm_status dtm_status; + uint32_t reserved; + union ta_dtm_cmd_input dtm_in_message; + union ta_dtm_cmd_output dtm_out_message; +}; + +int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd, + uint64_t fence_mc_addr); + +enum ta_hdcp_command { + TA_HDCP_COMMAND__INITIALIZE, + TA_HDCP_COMMAND__HDCP1_CREATE_SESSION, + TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION, + TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS, + TA_HDCP_COMMAND__UNUSED_1, + TA_HDCP_COMMAND__HDCP2_DESTROY_SESSION, + TA_HDCP_COMMAND__UNUSED_2, + TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION, + TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS, + TA_HDCP_COMMAND__UNUSED_3, + TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2, + TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2, + TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION +}; + +enum ta_hdcp2_msg_id { + TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE = 1, + TA_HDCP_HDCP2_MSG_ID__AKE_INIT = 2, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_CERT = 3, + TA_HDCP_HDCP2_MSG_ID__AKE_NO_STORED_KM = 4, + TA_HDCP_HDCP2_MSG_ID__AKE_STORED_KM = 5, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_RRX = 6, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_H_PRIME = 7, + TA_HDCP_HDCP2_MSG_ID__AKE_SEND_PAIRING_INFO = 8, + TA_HDCP_HDCP2_MSG_ID__LC_INIT = 9, + TA_HDCP_HDCP2_MSG_ID__LC_SEND_L_PRIME = 10, + TA_HDCP_HDCP2_MSG_ID__SKE_SEND_EKS = 11, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_RECEIVERID_LIST = 12, + TA_HDCP_HDCP2_MSG_ID__RTT_READY = 13, + TA_HDCP_HDCP2_MSG_ID__RTT_CHALLENGE = 14, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_SEND_ACK = 15, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_MANAGE = 16, + TA_HDCP_HDCP2_MSG_ID__REPEATERAUTH_STREAM_READY = 17, + TA_HDCP_HDCP2_MSG_ID__RECEIVER_AUTH_STATUS = 18, + TA_HDCP_HDCP2_MSG_ID__AKE_TRANSMITTER_INFO = 19, + TA_HDCP_HDCP2_MSG_ID__AKE_RECEIVER_INFO = 20, + TA_HDCP_HDCP2_MSG_ID__SIGNAL_CONTENT_STREAM_TYPE_DP = 129 +}; + +enum ta_hdcp2_hdcp2_msg_id_max_size { + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__NULL_MESSAGE = 0, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_INIT = 12, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT = 534, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM = 129, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_RRX = 9, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO = 17, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_INIT = 9, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SKE_SEND_EKS = 25, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RECEIVERID_LIST = 181, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_READY = 1, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RTT_CHALLENGE = 17, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_SEND_RACK = 17, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_MANAGE = 13, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__REPEATERAUTH_STREAM_READY = 33, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__RECEIVER_AUTH_STATUS = 4, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_TRANSMITTER_INFO = 6, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO = 6, + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__SIGNAL_CONTENT_STREAM_TYPE_DP = 1 +}; + +/* HDCP related enumerations */ +/**********************************************************/ +#define TA_HDCP__INVALID_SESSION 0xFFFF +#define TA_HDCP__HDCP1_AN_SIZE 8 +#define TA_HDCP__HDCP1_KSV_SIZE 5 +#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 +#define TA_HDCP__HDCP1_V_PRIME_SIZE 20 +#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \ + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6 + +// 64 bits boundaries +#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \ + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4 + +enum ta_hdcp_status { + TA_HDCP_STATUS__SUCCESS = 0x00, + TA_HDCP_STATUS__GENERIC_FAILURE = 0x01, + TA_HDCP_STATUS__NULL_POINTER = 0x02, + TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03, + TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04, + TA_HDCP_STATUS__INVALID_PARAMETER = 0x05, + TA_HDCP_STATUS__VHX_ERROR = 0x06, + TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07, + TA_HDCP_STATUS__SRM_FAILURE = 0x08, + TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09, + TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A, + TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B, + TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C, + TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D, + TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E, + TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F, + TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10, + TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11, + TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12, + TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13, + TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14, + TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15, + TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16, + TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17 +}; + +enum ta_hdcp_authentication_status { + TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04, + TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06, + TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07, + TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09 +}; + +enum ta_hdcp2_msg_authentication_status { + TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS = 0, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__KM_NOT_AVAILABLE, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNUSED, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID = 100, // everything above does not fail the request + TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_ENOUGH_MEMORY, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__NOT_EXPECTED_MSG, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__SIGNATURE_CERTIFICAT_ERROR, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INCORRECT_HDCP_VERSION, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__UNKNOWN_MESSAGE, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_HMAC, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_TOPOLOGY, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH, + TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST +}; + +enum ta_hdcp_content_type { + TA_HDCP2_CONTENT_TYPE__TYPE0 = 1, + TA_HDCP2_CONTENT_TYPE__TYPE1, +}; + +enum ta_hdcp_content_type_negotiation_type { + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE0 = 1, + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__FORCE_TYPE1, + TA_HDCP2_CONTENT_TYPE_NEGOTIATION_TYPE__MAX_SUPPORTED +}; + +enum ta_hdcp2_version { + TA_HDCP2_VERSION_UNKNOWN = 0, + TA_HDCP2_VERSION_2_0 = 20, + TA_HDCP2_VERSION_2_1 = 21, + TA_HDCP2_VERSION_2_2 = 22 +}; + +/* input/output structures for HDCP commands */ +/**********************************************************/ +struct ta_hdcp_cmd_hdcp1_create_session_input { + uint8_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_create_session_output { + uint32_t session_handle; + uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_primary; + uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_destroy_session_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_input { + uint32_t session_handle; + uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bcaps; + uint16_t r0_prime_primary; + uint16_t r0_prime_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_input { + uint32_t session_handle; + uint16_t bstatus_binfo; + uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE]; + uint32_t ksv_list_size; + uint8_t pj_prime; + uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_enable_encryption_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input { + uint32_t session_handle; + uint32_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_output { + uint32_t protection_level; +}; + +struct ta_hdcp_cmd_hdcp2_create_session_input_v2 { + uint32_t display_handle; + enum ta_hdcp_content_type_negotiation_type negotiate_content_type; +}; + +struct ta_hdcp_cmd_hdcp2_create_session_output_v2 { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_destroy_session_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_authentication_message_v2 { + enum ta_hdcp2_msg_id msg_id; + uint32_t msg_size; +}; + +struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 { + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg3_desc; + uint8_t receiver_message[TA_HDCP__HDCP2_RX_BUF_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 { + uint32_t hdcp_version; + uint32_t is_km_stored; + uint32_t is_locality_precompute_support; + uint32_t is_repeater; + enum ta_hdcp2_msg_authentication_status msg1_status; + enum ta_hdcp2_msg_authentication_status msg2_status; + enum ta_hdcp2_msg_authentication_status msg3_status; +}; + +struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 { + enum ta_hdcp2_msg_id msg1_id; + enum ta_hdcp2_msg_id msg2_id; +}; + +struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 { + enum ta_hdcp2_msg_authentication_status msg1_status; + enum ta_hdcp2_msg_authentication_status msg2_status; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg1_desc; + struct ta_hdcp_cmd_hdcp2_authentication_message_v2 msg2_desc; + uint8_t transmitter_message[TA_HDCP__HDCP2_TX_BUF_MAX_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 { + uint32_t session_handle; + struct ta_hdcp_cmd_hdcp2_process_authentication_message_input_v2 process; + struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_input_v2 prepare; +}; + +struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 { + uint32_t authentication_status; + struct ta_hdcp_cmd_hdcp2_process_authentication_message_output_v2 process; + struct ta_hdcp_cmd_hdcp2_prepare_authentication_message_output_v2 prepare; +}; + +struct ta_hdcp_cmd_hdcp2_set_encryption_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_get_encryption_status_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp2_get_encryption_status_output { + enum ta_hdcp_content_type hdcp2_type; + uint32_t protection_level; +}; + +struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input { + uint32_t session_handle; + uint32_t display_handle; +}; + +/**********************************************************/ +/* Common input structure for HDCP callbacks */ +union ta_hdcp_cmd_input { + struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption; + struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_destroy_session_input hdcp2_destroy_session; + struct ta_hdcp_cmd_hdcp2_set_encryption_input hdcp2_set_encryption; + struct ta_hdcp_cmd_hdcp2_get_encryption_status_input hdcp2_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_create_session_input_v2 hdcp2_create_session_v2; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 + hdcp2_prepare_process_authentication_message_v2; + struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption; +}; + +/* Common output structure for HDCP callbacks */ +union ta_hdcp_cmd_output { + struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_get_encryption_status_output hdcp2_get_encryption_status; + struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2; + struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 + hdcp2_prepare_process_authentication_message_v2; +}; +/**********************************************************/ + +struct ta_hdcp_shared_memory { + uint32_t cmd_id; + enum ta_hdcp_status hdcp_status; + uint32_t reserved; + union ta_hdcp_cmd_input in_msg; + union ta_hdcp_cmd_output out_msg; +}; + +enum psp_status { + PSP_STATUS__SUCCESS = 0, + PSP_STATUS__ERROR_INVALID_PARAMS, + PSP_STATUS__ERROR_GENERIC, + PSP_STATUS__ERROR_OUT_OF_MEMORY, + PSP_STATUS__ERROR_UNSUPPORTED_FEATURE +}; + +#endif /* MODULES_HDCP_HDCP_PSP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h new file mode 100644 index 000000000000..f2a0e1a064da --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -0,0 +1,298 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_H_ +#define MOD_HDCP_H_ + +#include "os_types.h" +#include "signal_types.h" + +/* Forward Declarations */ +struct mod_hdcp; + +#define MAX_NUM_OF_DISPLAYS 6 +#define MAX_NUM_OF_ATTEMPTS 4 +#define MAX_NUM_OF_ERROR_TRACE 10 + +/* detailed return status */ +enum mod_hdcp_status { + MOD_HDCP_STATUS_SUCCESS = 0, + MOD_HDCP_STATUS_FAILURE, + MOD_HDCP_STATUS_RESET_NEEDED, + MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND, + MOD_HDCP_STATUS_DISPLAY_NOT_FOUND, + MOD_HDCP_STATUS_INVALID_STATE, + MOD_HDCP_STATUS_NOT_IMPLEMENTED, + MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE, + MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE, + MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER, + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION, + MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE, + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED, + MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE, + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV, + MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */ + MOD_HDCP_STATUS_INVALID_OPERATION, + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING, + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, + MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED, + MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED, + MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION, + MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST, + MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE, +}; + +struct mod_hdcp_displayport { + uint8_t rev; + uint8_t assr_supported; +}; + +struct mod_hdcp_hdmi { + uint8_t reserved; +}; +enum mod_hdcp_operation_mode { + MOD_HDCP_MODE_OFF, + MOD_HDCP_MODE_DEFAULT, + MOD_HDCP_MODE_DP, + MOD_HDCP_MODE_DP_MST +}; + +enum mod_hdcp_display_state { + MOD_HDCP_DISPLAY_INACTIVE = 0, + MOD_HDCP_DISPLAY_ACTIVE, + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, + MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED +}; + +struct mod_hdcp_ddc { + void *handle; + struct { + bool (*read_i2c)(void *handle, + uint32_t address, + uint8_t offset, + uint8_t *data, + uint32_t size); + bool (*write_i2c)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + bool (*read_dpcd)(void *handle, + uint32_t address, + uint8_t *data, + uint32_t size); + bool (*write_dpcd)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + } funcs; +}; + +struct mod_hdcp_psp { + void *handle; + void *funcs; +}; + +struct mod_hdcp_display_adjustment { + uint8_t disable : 1; + uint8_t reserved : 7; +}; + +struct mod_hdcp_link_adjustment_hdcp1 { + uint8_t disable : 1; + uint8_t postpone_encryption : 1; + uint8_t reserved : 6; +}; + +enum mod_hdcp_force_hdcp_type { + MOD_HDCP_FORCE_TYPE_MAX = 0, + MOD_HDCP_FORCE_TYPE_0, + MOD_HDCP_FORCE_TYPE_1 +}; + +struct mod_hdcp_link_adjustment_hdcp2 { + uint8_t disable : 1; + uint8_t force_type : 2; + uint8_t force_no_stored_km : 1; + uint8_t increase_h_prime_timeout: 1; + uint8_t reserved : 3; +}; + +struct mod_hdcp_link_adjustment { + uint8_t auth_delay; + struct mod_hdcp_link_adjustment_hdcp1 hdcp1; + struct mod_hdcp_link_adjustment_hdcp2 hdcp2; +}; + +struct mod_hdcp_error { + enum mod_hdcp_status status; + uint8_t state_id; +}; + +struct mod_hdcp_trace { + struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; + uint8_t error_count; +}; + +enum mod_hdcp_encryption_status { + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0, + MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_ON +}; + +/* per link events dm has to notify to hdcp module */ +enum mod_hdcp_event { + MOD_HDCP_EVENT_CALLBACK = 0, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + MOD_HDCP_EVENT_CPIRQ +}; + +/* output flags from module requesting timer operations */ +struct mod_hdcp_output { + uint8_t callback_needed; + uint8_t callback_stop; + uint8_t watchdog_timer_needed; + uint8_t watchdog_timer_stop; + uint16_t callback_delay; + uint16_t watchdog_timer_delay; +}; + +/* used to represent per display info */ +struct mod_hdcp_display { + enum mod_hdcp_display_state state; + uint8_t index; + uint8_t controller; + uint8_t dig_fe; + union { + uint8_t vc_id; + }; + struct mod_hdcp_display_adjustment adjust; +}; + +/* used to represent per link info */ +/* in case a link has multiple displays, they share the same link info */ +struct mod_hdcp_link { + enum mod_hdcp_operation_mode mode; + uint8_t dig_be; + uint8_t ddc_line; + union { + struct mod_hdcp_displayport dp; + struct mod_hdcp_hdmi hdmi; + }; + struct mod_hdcp_link_adjustment adjust; +}; + +/* a query structure for a display's hdcp information */ +struct mod_hdcp_display_query { + const struct mod_hdcp_display *display; + const struct mod_hdcp_link *link; + const struct mod_hdcp_trace *trace; + enum mod_hdcp_encryption_status encryption_status; +}; + +/* contains values per on external display configuration change */ +struct mod_hdcp_config { + struct mod_hdcp_psp psp; + struct mod_hdcp_ddc ddc; + uint8_t index; +}; + +struct mod_hdcp; + +/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/ +size_t mod_hdcp_get_memory_size(void); + +/* called per link on link creation */ +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config); + +/* called per link on link destroy */ +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp); + +/* called per display on cp_desired set to true */ +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output); + +/* called per display on cp_desired set to false */ +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output); + +/* called to query hdcp information on a specific index */ +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query); + +/* called per link on connectivity change */ +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output); + +/* called per link on events (i.e. callback, watchdog, CP_IRQ) */ +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output); + +/* called to convert enum mod_hdcp_status to c string */ +char *mod_hdcp_status_to_str(int32_t status); + +/* called to convert state id to c string */ +char *mod_hdcp_state_id_to_str(int32_t id); + +/* called to convert signal type to operation mode */ +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal); +#endif /* MOD_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index d930bdecb117..42cbeffac640 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -26,6 +26,7 @@ #ifndef MOD_INFO_PACKET_H_ #define MOD_INFO_PACKET_H_ +#include "dm_services.h" #include "mod_shared.h" //Forward Declarations struct dc_stream_state; @@ -33,6 +34,10 @@ struct dc_info_packet; struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet); + struct dc_info_packet *info_packet, + bool *use_vsc_sdp_for_colorimetry); + +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); #endif diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h index b45f7d65e76a..fe2117904329 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -45,7 +45,6 @@ enum vrr_packet_type { PACKET_TYPE_VTEM }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) union lut3d_control_flags { unsigned int raw; struct { @@ -104,6 +103,5 @@ struct lut3d_settings { enum lut3d_control_gamut_map map2; enum lut3d_control_rotation_mode rotation2; }; -#endif #endif /* MOD_SHARED_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index d885d642ed7f..6a8a056424b8 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -30,7 +30,22 @@ #include "mod_freesync.h" #include "dc.h" +enum vsc_packet_revision { + vsc_packet_undefined = 0, + //01h = VSC SDP supports only 3D stereo. + vsc_packet_rev1 = 1, + //02h = 3D stereo + PSR. + vsc_packet_rev2 = 2, + //03h = 3D stereo + PSR2. + vsc_packet_rev3 = 3, + //04h = 3D stereo + PSR/PSR2 + Y-coordinate. + vsc_packet_rev4 = 4, + //05h = 3D stereo + PSR/PSR2 + Y-coordinate + Pixel Encoding/Colorimetry Format + vsc_packet_rev5 = 5, +}; + #define HDMI_INFOFRAME_TYPE_VENDOR 0x81 +#define HF_VSIF_VERSION 1 // VTEM Byte Offset #define VTEM_PB0 0 @@ -115,35 +130,41 @@ enum ColorimetryYCCDP { }; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet) + struct dc_info_packet *info_packet, + bool *use_vsc_sdp_for_colorimetry) { - unsigned int vscPacketRevision = 0; + unsigned int vsc_packet_revision = vsc_packet_undefined; unsigned int i; unsigned int pixelEncoding = 0; unsigned int colorimetryFormat = 0; bool stereo3dSupport = false; + /* Initialize first, later if infopacket is valid determine if VSC SDP + * should be used to signal colorimetry format and pixel encoding. + */ + *use_vsc_sdp_for_colorimetry = false; + if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) { - vscPacketRevision = 1; + vsc_packet_revision = vsc_packet_rev1; stereo3dSupport = true; } /*VSC packet set to 2 when DP revision >= 1.2*/ if (stream->psr_version != 0) - vscPacketRevision = 2; + vsc_packet_revision = vsc_packet_rev2; /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - vscPacketRevision = 5; + vsc_packet_revision = vsc_packet_rev5; /* VSC packet not needed based on the features * supported by this DP display */ - if (vscPacketRevision == 0) + if (vsc_packet_revision == vsc_packet_undefined) return; - if (vscPacketRevision == 0x2) { + if (vsc_packet_revision == vsc_packet_rev2) { /* Secondary-data Packet ID = 0*/ info_packet->hb0 = 0x00; /* 07h - Packet Type Value indicating Video @@ -165,7 +186,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; } - if (vscPacketRevision == 0x1) { + if (vsc_packet_revision == vsc_packet_rev1) { info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0 info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet @@ -236,7 +257,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, * the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").) */ - if (vscPacketRevision == 0x5) { + if (vsc_packet_revision == vsc_packet_rev5) { /* Secondary-data Packet ID = 0 */ info_packet->hb0 = 0x00; /* 07h - Packet Type Value indicating Video Stream Configuration packet */ @@ -248,6 +269,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; + /* If we are using VSC SDP revision 05h, use this to signal for + * colorimetry format and pixel encoding. HW should later be + * programmed to set MSA MISC1 bit 6 to indicate ignore + * colorimetry format and pixel encoding in the MSA. + */ + *use_vsc_sdp_for_colorimetry = true; + /* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs * Data Bytes DB 18~16 * Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding) @@ -392,6 +420,102 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, */ info_packet->sb[18] = 0; } +} + +/** + ***************************************************************************** + * Function: mod_build_hf_vsif_infopacket + * + * @brief + * Prepare HDMI Vendor Specific info frame. + * Follows HDMI Spec to build up Vendor Specific info frame + * + * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.) + * @param [out] info_packet: output structure where to store VSIF + ***************************************************************************** + */ +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue) +{ + unsigned int length = 5; + bool hdmi_vic_mode = false; + uint8_t checksum = 0; + uint32_t i = 0; + enum dc_timing_3d_format format; + bool bALLM = (bool)ALLMEnabled; + bool bALLMVal = (bool)ALLMValue; + + info_packet->valid = false; + format = stream->timing.timing_3d_format; + if (stream->view_format == VIEW_3D_FORMAT_NONE) + format = TIMING_3D_FORMAT_NONE; + + if (stream->timing.hdmi_vic != 0 + && stream->timing.h_total >= 3840 + && stream->timing.v_total >= 2160 + && format == TIMING_3D_FORMAT_NONE) + hdmi_vic_mode = true; + + if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM) + return; + + info_packet->sb[1] = 0x03; + info_packet->sb[2] = 0x0C; + info_packet->sb[3] = 0x00; + + if (bALLM) { + info_packet->sb[1] = 0xD8; + info_packet->sb[2] = 0x5D; + info_packet->sb[3] = 0xC4; + info_packet->sb[4] = HF_VSIF_VERSION; + } + + if (format != TIMING_3D_FORMAT_NONE) + info_packet->sb[4] = (2 << 5); + + else if (hdmi_vic_mode) + info_packet->sb[4] = (1 << 5); + + switch (format) { + case TIMING_3D_FORMAT_HW_FRAME_PACKING: + case TIMING_3D_FORMAT_SW_FRAME_PACKING: + info_packet->sb[5] = (0x0 << 4); + break; + case TIMING_3D_FORMAT_SIDE_BY_SIDE: + case TIMING_3D_FORMAT_SBS_SW_PACKED: + info_packet->sb[5] = (0x8 << 4); + length = 6; + break; + + case TIMING_3D_FORMAT_TOP_AND_BOTTOM: + case TIMING_3D_FORMAT_TB_SW_PACKED: + info_packet->sb[5] = (0x6 << 4); + break; + + default: + break; + } + + if (hdmi_vic_mode) + info_packet->sb[5] = stream->timing.hdmi_vic; + + info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; + info_packet->hb1 = 0x01; + info_packet->hb2 = (uint8_t) (length); + + if (bALLM) + info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1); + + checksum += info_packet->hb0; + checksum += info_packet->hb1; + checksum += info_packet->hb2; + + for (i = 1; i <= length; i++) + checksum += info_packet->sb[i]; + + info_packet->sb[0] = (uint8_t) (0x100 - checksum); + + info_packet->valid = true; } diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 05e2be856037..4e2f615c3566 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -80,18 +80,18 @@ struct abm_parameters { static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0}, - {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0}, - {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70}, + {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0}, + {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf}, + {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0}, + {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, + {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters * const abm_settings[] = { @@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = { /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ - uint16_t flags; /* 0x00 U16 */ + uint16_t min_abm_backlight; /* 0x00 U16 */ /* parameters for ABM2.0 algorithm */ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ @@ -140,10 +140,10 @@ struct iram_table_v_2 { /* For reading PSR State directly from IRAM */ uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ uint8_t dmcu_state; /* 0xf6 */ uint16_t blRampReduction; /* 0xf7 */ @@ -164,42 +164,43 @@ struct iram_table_v_2_2 { uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ - uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ - uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ - uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ - uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ - uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ - uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ - uint8_t pad[21]; /* 0x6b U0.8 */ + uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ + uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ + uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ + uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ + uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ + uint16_t min_abm_backlight; /* 0x6b U16 */ + uint8_t pad[19]; /* 0x6d U0.8 */ /* parameters for crgb conversion */ - uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ - uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ - uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ /* parameters for custom curve */ /* thresholds for brightness --> backlight */ - uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ /* offsets for brightness --> backlight */ - uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ /* For reading PSR State directly from IRAM */ - uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ - uint8_t dmcu_state; /* 0xf6 */ - - uint8_t dummy1; /* 0xf7 */ - uint8_t dummy2; /* 0xf8 */ - uint8_t dummy3; /* 0xf9 */ - uint8_t dummy4; /* 0xfa */ - uint8_t dummy5; /* 0xfb */ - uint8_t dummy6; /* 0xfc */ - uint8_t dummy7; /* 0xfd */ - uint8_t dummy8; /* 0xfe */ - uint8_t dummy9; /* 0xff */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_state; /* 0xf6 */ + + uint8_t dummy1; /* 0xf7 */ + uint8_t dummy2; /* 0xf8 */ + uint8_t dummy3; /* 0xf9 */ + uint8_t dummy4; /* 0xfa */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ }; #pragma pack(pop) @@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters { unsigned int set = params.set; - ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); ram_table->deviation_gain = 0xb3; ram_table->blRampReduction = @@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + ram_table->deviation_gain[0] = 0xb3; ram_table->deviation_gain[1] = 0xa8; ram_table->deviation_gain[2] = 0x98; @@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame unsigned int set = params.set; ram_table->flags = 0x0; + + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + for (i = 0; i < NUM_AGGR_LEVEL; i++) { ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain; ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index da5df00fedce..e54157026330 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -38,6 +38,7 @@ struct dmcu_iram_parameters { unsigned int backlight_lut_array_size; unsigned int backlight_ramping_reduction; unsigned int backlight_ramping_start; + unsigned int min_abm_backlight; unsigned int set; }; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 8889aaceec60..d655a76bedc6 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -53,7 +53,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, - AMD_IP_BLOCK_TYPE_MES + AMD_IP_BLOCK_TYPE_MES, + AMD_IP_BLOCK_TYPE_JPEG }; enum amd_clockgating_state { @@ -99,6 +100,7 @@ enum amd_powergating_state { #define AMD_CG_SUPPORT_IH_CG (1 << 27) #define AMD_CG_SUPPORT_ATHUB_LS (1 << 28) #define AMD_CG_SUPPORT_ATHUB_MGCG (1 << 29) +#define AMD_CG_SUPPORT_JPEG_MGCG (1 << 30) /* PG flags */ #define AMD_PG_SUPPORT_GFX_PG (1 << 0) #define AMD_PG_SUPPORT_GFX_SMG (1 << 1) @@ -117,6 +119,7 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_VCN (1 << 14) #define AMD_PG_SUPPORT_VCN_DPG (1 << 15) #define AMD_PG_SUPPORT_ATHUB (1 << 16) +#define AMD_PG_SUPPORT_JPEG (1 << 17) enum PP_FEATURE_MASK { PP_SCLK_DPM_MASK = 0x1, @@ -143,6 +146,8 @@ enum PP_FEATURE_MASK { enum DC_FEATURE_MASK { DC_FBC_MASK = 0x1, DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2, + DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4, + DC_PSR_MASK = 0x8, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h index a761ba07f937..fce965984e76 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_FUSESTRAP0 0x14D7 #define mmBUS_CNTL 0x1508 #define mmCONFIG_CNTL 0x1509 #define mmCONFIG_MEMSIZE 0x150a diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h index 8fbfd0261d27..39cc4880beb4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h @@ -32,6 +32,8 @@ #define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 #define MM_DATA__MM_DATA_MASK 0xffffffff #define MM_DATA__MM_DATA__SHIFT 0x0 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1 #define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1 #define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0 #define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 809759f7bb81..8d05d6ca1c8d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_FUSESTRAP0 0x14D7 #define mmCC_BIF_BX_STRAP2 0x152A #define mmBIF_MM_INDACCESS_CNTL 0x1500 #define mmBIF_DOORBELL_APER_EN 0x1501 diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h index adc71b01f793..73435687d049 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h @@ -32,6 +32,8 @@ #define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 #define MM_DATA__MM_DATA_MASK 0xffffffff #define MM_DATA__MM_DATA__SHIFT 0x0 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1 #define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2 #define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1 #define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h index be4249adb356..eddf83ec1c39 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h @@ -9859,6 +9859,8 @@ #define mmDP0_DP_STEER_FIFO_BASE_IDX 2 #define mmDP0_DP_MSA_MISC 0x210e #define mmDP0_DP_MSA_MISC_BASE_IDX 2 +#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f +#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP0_DP_VID_TIMING 0x2110 #define mmDP0_DP_VID_TIMING_BASE_IDX 2 #define mmDP0_DP_VID_N 0x2111 @@ -10187,6 +10189,8 @@ #define mmDP1_DP_STEER_FIFO_BASE_IDX 2 #define mmDP1_DP_MSA_MISC 0x220e #define mmDP1_DP_MSA_MISC_BASE_IDX 2 +#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f +#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_VID_TIMING 0x2210 #define mmDP1_DP_VID_TIMING_BASE_IDX 2 #define mmDP1_DP_VID_N 0x2211 @@ -10515,6 +10519,8 @@ #define mmDP2_DP_STEER_FIFO_BASE_IDX 2 #define mmDP2_DP_MSA_MISC 0x230e #define mmDP2_DP_MSA_MISC_BASE_IDX 2 +#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f +#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_VID_TIMING 0x2310 #define mmDP2_DP_VID_TIMING_BASE_IDX 2 #define mmDP2_DP_VID_N 0x2311 @@ -10843,6 +10849,8 @@ #define mmDP3_DP_STEER_FIFO_BASE_IDX 2 #define mmDP3_DP_MSA_MISC 0x240e #define mmDP3_DP_MSA_MISC_BASE_IDX 2 +#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f +#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_VID_TIMING 0x2410 #define mmDP3_DP_VID_TIMING_BASE_IDX 2 #define mmDP3_DP_VID_N 0x2411 @@ -11171,6 +11179,8 @@ #define mmDP4_DP_STEER_FIFO_BASE_IDX 2 #define mmDP4_DP_MSA_MISC 0x250e #define mmDP4_DP_MSA_MISC_BASE_IDX 2 +#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f +#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_VID_TIMING 0x2510 #define mmDP4_DP_VID_TIMING_BASE_IDX 2 #define mmDP4_DP_VID_N 0x2511 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h index ca16d9125fbc..2bfaaa8157d0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h @@ -1146,7 +1146,14 @@ #define mmATC_L2_MEM_POWER_LS_BASE_IDX 0 #define mmATC_L2_CGTT_CLK_CTRL 0x080c #define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0 - +#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e +#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f +#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810 +#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811 +#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0 // addressBlock: gc_utcl2_vml2pfdec // base address: 0xa100 @@ -1206,7 +1213,14 @@ #define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0 #define mmVM_L2_CGTT_CLK_CTRL 0x085e #define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0 - +#define mmVM_L2_MEM_ECC_INDEX 0x0860 +#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861 +#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVM_L2_MEM_ECC_CNT 0x0862 +#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0 +#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863 +#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0 // addressBlock: gc_utcl2_vml2vcdec // base address: 0xa200 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index 064c4bb1dc62..d4c613a85352 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -6661,7 +6661,6 @@ #define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L #define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L - // addressBlock: gc_utcl2_vml2pfdec //VM_L2_CNTL #define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0 @@ -6991,7 +6990,22 @@ #define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L #define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L #define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L - +//VM_L2_MEM_ECC_INDEX +#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VM_L2_WALKER_MEM_ECC_INDEX +#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VM_L2_MEM_ECC_CNT +#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc +#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe +#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L +#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L +//VM_L2_WALKER_MEM_ECC_CNT +#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc +#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe +#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L +#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L // addressBlock: gc_utcl2_vml2vcdec //VM_CONTEXT0_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h index 352ffae7a7ca..2c3ce243861a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h @@ -1964,4 +1964,20 @@ #define mmATC_L2_PERFCOUNTER_RSLT_CNTL 0x084a #define mmATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0 +/* MMEA */ +#define mmMMEA0_EDC_CNT_VG20 0x0206 +#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0 +#define mmMMEA0_EDC_CNT2_VG20 0x0207 +#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0 +#define mmMMEA1_EDC_CNT_VG20 0x0346 +#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0 +#define mmMMEA1_EDC_CNT2_VG20 0x0347 +#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0 + +// addressBlock: mmhub_utcl2_vmsharedpfdec +// base address: 0x6a040 +#define mmMC_VM_XGMI_LFB_CNTL 0x0823 +#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0 +#define mmMC_VM_XGMI_LFB_SIZE 0x0824 +#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h index 34278ef2aa1b..198f5f93ed1a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h @@ -10124,4 +10124,126 @@ #define ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA0_EDC_CNT +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA0_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA0_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA0_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA0_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA0_EDC_CNT2 +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA0_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +//MMEA1_EDC_CNT +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT_VG20__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT_VG20__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT_VG20__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA1_EDC_CNT_VG20__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT_VG20__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT_VG20__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT_VG20__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA1_EDC_CNT_VG20__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA1_EDC_CNT_VG20__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA1_EDC_CNT_VG20__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA1_EDC_CNT2 +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT2_VG20__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT2_VG20__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT2_VG20__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT2_VG20__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA1_EDC_CNT2_VG20__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT2_VG20__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT2_VG20__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT2_VG20__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT2_VG20__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L + +// addressBlock: mmhub_utcl2_vmsharedpfdec +//MC_VM_XGMI_LFB_CNTL +#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 +#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L +//MC_VM_XGMI_LFB_SIZE +#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 +#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h deleted file mode 100644 index f2ae3a58949e..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (C) 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN - * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef _mmhub_9_4_0_OFFSET_HEADER -#define _mmhub_9_4_0_OFFSET_HEADER - -/* MMEA */ -#define mmMMEA0_SDP_ARB_FINAL_VG20 0x01ee -#define mmMMEA0_SDP_ARB_FINAL_VG20_BASE_IDX 0 -#define mmMMEA0_EDC_CNT_VG20 0x0206 -#define mmMMEA0_EDC_CNT_VG20_BASE_IDX 0 -#define mmMMEA0_EDC_CNT2_VG20 0x0207 -#define mmMMEA0_EDC_CNT2_VG20_BASE_IDX 0 -#define mmMMEA0_EDC_MODE_VG20 0x0210 -#define mmMMEA0_EDC_MODE_VG20_BASE_IDX 0 -#define mmMMEA0_ERR_STATUS_VG20 0x0211 -#define mmMMEA0_ERR_STATUS_VG20_BASE_IDX 0 -#define mmMMEA1_SDP_ARB_FINAL_VG20 0x032e -#define mmMMEA1_SDP_ARB_FINAL_VG20_BASE_IDX 0 -#define mmMMEA1_EDC_CNT_VG20 0x0346 -#define mmMMEA1_EDC_CNT_VG20_BASE_IDX 0 -#define mmMMEA1_EDC_CNT2_VG20 0x0347 -#define mmMMEA1_EDC_CNT2_VG20_BASE_IDX 0 -#define mmMMEA1_EDC_MODE_VG20 0x0350 -#define mmMMEA1_EDC_MODE_VG20_BASE_IDX 0 -#define mmMMEA1_ERR_STATUS_VG20 0x0351 -#define mmMMEA1_ERR_STATUS_VG20_BASE_IDX 0 - -// addressBlock: mmhub_utcl2_vmsharedpfdec -// base address: 0x6a040 -#define mmMC_VM_XGMI_LFB_CNTL 0x0823 -#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0 -#define mmMC_VM_XGMI_LFB_SIZE 0x0824 -#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0 - -#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h deleted file mode 100644 index c24259ed12a1..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (C) 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN - * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef _mmhub_9_4_0_SH_MASK_HEADER -#define _mmhub_9_4_0_SH_MASK_HEADER - -//MMEA0_SDP_ARB_FINAL -#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 -#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 -#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa -#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 -#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 -#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a -#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL -#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L -#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L -#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L -#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L -#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L -#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L -//MMEA0_EDC_CNT -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc -#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe -#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 -#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 -#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 -#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 -#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 -#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a -#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L -#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L -#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L -#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L -#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L -#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L -#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L -#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L -#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L -//MMEA0_EDC_CNT2 -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc -#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe -#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 -#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 -#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 -#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L -#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L -#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L -#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L -#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L -#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L -//MMEA0_EDC_MODE -#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 -#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11 -#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14 -#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d -#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f -#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L -#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L -#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L -#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L -#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L -//MMEA0_ERR_STATUS -#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 -#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa -#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb -#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc -#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd -#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL -#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L -#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L -#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L -#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L -#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L -//MMEA1_SDP_ARB_FINAL -#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 -#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 -#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa -#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 -#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 -#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a -#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL -#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L -#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L -#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L -#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L -#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L -#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L -//MMEA1_EDC_CNT -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc -#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe -#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 -#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 -#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 -#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 -#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 -#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a -#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L -#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L -#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L -#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L -#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L -#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L -#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L -#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L -#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L -//MMEA1_EDC_CNT2 -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa -#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc -#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe -#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 -#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 -#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 -#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L -#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L -#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L -#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L -#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L -#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L -#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L -#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L -#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L -#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L -//MMEA1_EDC_MODE -#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 -#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11 -#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14 -#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d -#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f -#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L -#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L -#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L -#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L -#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L -//MMEA1_ERR_STATUS -#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 -#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa -#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb -#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc -#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd -#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL -#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L -#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L -#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L -#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L -#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L - -// addressBlock: mmhub_utcl2_vmsharedpfdec -//MC_VM_XGMI_LFB_CNTL -#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 -#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 -#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L -#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L -//MC_VM_XGMI_LFB_SIZE -#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 -#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL - -#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h index 4bcacf529852..991128bb9476 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h @@ -22,6 +22,9 @@ #ifndef _nbio_7_4_0_SMN_HEADER #define _nbio_7_4_0_SMN_HEADER +// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk +// base address: 0x10100000 +#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c #define smnCPM_CONTROL 0x11180460 @@ -53,4 +56,13 @@ #define smnPCIE_RX_NUM_NAK 0x11180038 #define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c +// addressBlock: nbio_iohub_nb_misc_misc_cfgdec +// base address: 0x13a10000 +#define smnIOHC_INTERRUPT_EOI 0x13a10120 + +// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec +// base address: 0x13a20000 +#define smnRAS_GLOBAL_STATUS_LO 0x13a20020 +#define smnRAS_GLOBAL_STATUS_HI 0x13a20024 + #endif // _nbio_7_4_0_SMN_HEADER diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h index 994e796a28d7..ce5830ebe095 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -2793,8 +2793,8 @@ #define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2 #define mmBIF_FB_EN 0x00ff #define mmBIF_FB_EN_BASE_IDX 2 -#define mmBIF_BUSY_DELAY_CNTR 0x0100 -#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2 +#define mmBIF_INTR_CNTL 0x0100 +#define mmBIF_INTR_CNTL_BASE_IDX 2 #define mmBIF_MST_TRANS_PENDING_VF 0x0109 #define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2 #define mmBIF_SLV_TRANS_PENDING_VF 0x010a diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h index d467b939c971..07f04b2b5bdd 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h @@ -20420,9 +20420,9 @@ #define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1 #define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L #define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L -//BIF_BUSY_DELAY_CNTR -#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0 -#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL +//BIF_INTR_CNTL +#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0 +#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L //BIF_MST_TRANS_PENDING_VF #define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0 #define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL @@ -48436,4 +48436,47 @@ #define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L #define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L +//IOHC_INTERRUPT_EOI +#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0 +#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1 +#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2 +#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L +#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L +#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L + +//RAS_GLOBAL_STATUS_LO +#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0 +#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1 +#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2 +#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8 +#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9 +#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa +#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb +#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd +#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf +#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L +#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L +#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L +#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L +#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L +#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L +#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L +#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L +#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L +//RAS_GLOBAL_STATUS_HI +#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0 +#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1 +#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L +#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h index dc9895a684fe..096d878eb1de 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h @@ -588,11 +588,15 @@ #define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L #define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L //IH_CLK_CTRL +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19 +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a #define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b #define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c #define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d #define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e #define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L #define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L #define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L #define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h index dbc2e723f659..71169daa701a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h @@ -49,6 +49,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 6af9f0217b34..61a9a84e0c3a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h @@ -194,6 +194,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index bd3685166779..351446754c72 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h @@ -49,6 +49,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h index 627906674fe8..4bfd5f8ba66c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h @@ -194,6 +194,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index f35aba72e640..21da61c398f5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -52,6 +52,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h index 481ee6560aa9..f64fe0fbcb32 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -220,6 +220,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h index d3876052562b..687d6843c258 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h @@ -121,6 +121,98 @@ #define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0 #define mmCKSVII2C_IC_COMP_TYPE 0x006d #define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0 +#define mmCKSVII2C1_IC_CON 0x0080 +#define mmCKSVII2C1_IC_CON_BASE_IDX 0 +#define mmCKSVII2C1_IC_TAR 0x0081 +#define mmCKSVII2C1_IC_TAR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SAR 0x0082 +#define mmCKSVII2C1_IC_SAR_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_MADDR 0x0083 +#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DATA_CMD 0x0084 +#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0 +#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085 +#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086 +#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087 +#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088 +#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089 +#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a +#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_INTR_STAT 0x008b +#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0 +#define mmCKSVII2C1_IC_INTR_MASK 0x008c +#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0 +#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d +#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0 +#define mmCKSVII2C1_IC_RX_TL 0x008e +#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0 +#define mmCKSVII2C1_IC_TX_TL 0x008f +#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_INTR 0x0090 +#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091 +#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092 +#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093 +#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094 +#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095 +#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096 +#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097 +#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098 +#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_START_DET 0x0099 +#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a +#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0 +#define mmCKSVII2C1_IC_ENABLE 0x009b +#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0 +#define mmCKSVII2C1_IC_STATUS 0x009c +#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0 +#define mmCKSVII2C1_IC_TXFLR 0x009d +#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_RXFLR 0x009e +#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SDA_HOLD 0x009f +#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0 +#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0 +#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1 +#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_CR 0x00a2 +#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3 +#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4 +#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5 +#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0 +#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6 +#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7 +#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8 +#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9 +#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa +#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab +#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac +#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad +#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0 #define mmSMUIO_MP_RESET_INTR 0x00c1 #define mmSMUIO_MP_RESET_INTR_BASE_IDX 0 #define mmSMUIO_SOC_HALT 0x00c2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h index f8afa3518bf2..6905a9618127 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h @@ -268,6 +268,182 @@ //CKSVII2C_IC_COMP_TYPE #define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0 #define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL +//CKSVII2C1_IC_CON +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5 +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C1_IC_TAR +#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0 +#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa +#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL +#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L +#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C1_IC_SAR +#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0 +#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL +//CKSVII2C1_IC_HS_MADDR +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0 +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L +//CKSVII2C1_IC_DATA_CMD +#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0 +#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8 +#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9 +#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa +#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL +#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L +#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L +#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L +//CKSVII2C1_IC_SS_SCL_HCNT +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_SS_SCL_LCNT +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_HCNT +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_LCNT +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_HCNT +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_LCNT +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_INTR_STAT +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_INTR_MASK +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_RAW_INTR_STAT +//CKSVII2C1_IC_RX_TL +//CKSVII2C1_IC_TX_TL +//CKSVII2C1_IC_CLR_INTR +//CKSVII2C1_IC_CLR_RX_UNDER +//CKSVII2C1_IC_CLR_RX_OVER +//CKSVII2C1_IC_CLR_TX_OVER +//CKSVII2C1_IC_CLR_RD_REQ +//CKSVII2C1_IC_CLR_TX_ABRT +//CKSVII2C1_IC_CLR_RX_DONE +//CKSVII2C1_IC_CLR_ACTIVITY +//CKSVII2C1_IC_CLR_STOP_DET +//CKSVII2C1_IC_CLR_START_DET +//CKSVII2C1_IC_CLR_GEN_CALL +//CKSVII2C1_IC_ENABLE +#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L +//CKSVII2C1_IC_STATUS +#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0 +#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1 +#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2 +#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3 +#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4 +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5 +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6 +#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L +#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L +#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L +#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L +#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L +//CKSVII2C1_IC_TXFLR +//CKSVII2C1_IC_RXFLR +//CKSVII2C1_IC_SDA_HOLD +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL +//CKSVII2C1_IC_TX_ABRT_SOURCE +//CKSVII2C1_IC_SLV_DATA_NACK_ONLY +//CKSVII2C1_IC_DMA_CR +//CKSVII2C1_IC_DMA_TDLR +//CKSVII2C1_IC_DMA_RDLR +//CKSVII2C1_IC_SDA_SETUP +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL +//CKSVII2C1_IC_ACK_GENERAL_CALL +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C1_IC_ENABLE_STATUS +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L //SMUIO_MP_RESET_INTR #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h index cf2149cc12ee..90350f46a0c4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h @@ -24,6 +24,18 @@ // addressBlock: uvd0_mmsch_dec // base address: 0x1e000 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 // addressBlock: uvd0_jpegnpdec diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index e88541d67aa0..dd7cbc00a0aa 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1 /* Total 32bit cap indication */ enum atombios_firmware_capability { - ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001, - ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002, - ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040, - ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080, - ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, - ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, + ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001, + ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002, + ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040, + ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080, + ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, + ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, + ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400, }; enum atom_cooling_solution_id{ @@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1 uint16_t used_by_driver_in_kb; }; +/* This is part of vram_usagebyfirmware_v2_1 */ +struct vram_reserve_block +{ + uint32_t start_address_in_kb; + uint16_t used_by_firmware_in_kb; + uint16_t used_by_driver_in_kb; +}; + +/* Definitions for constance */ +enum atomfirmware_internal_constants +{ + ONE_KiB = 0x400, + ONE_MiB = 0x100000, +}; /* *************************************************************************** diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index 5dcb776548d8..7ec4331e67f2 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -25,7 +25,6 @@ #define _DISCOVERY_H_ #define PSP_HEADER_SIZE 256 -#define BINARY_MAX_SIZE (64 << 10) #define BINARY_SIGNATURE 0x28211407 #define DISCOVERY_TABLE_SIGNATURE 0x53445049 diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h new file mode 100644 index 000000000000..79af4258f259 --- /dev/null +++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __IRQSRCS_NBIF_7_4_H__ +#define __IRQSRCS_NBIF_7_4_H__ + +#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated +#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off +#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller +#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB +#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF) +#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF) +#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF) +#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF) +#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT +#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT +#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC +#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space + +#endif // __IRQSRCS_NBIF_7_4_H__ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 98b9533e672b..2cd217e60125 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -291,15 +291,18 @@ struct kfd2kgd_calls { uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); - bool (*get_atc_vmid_pasid_mapping_valid)( + bool (*get_atc_vmid_pasid_mapping_info)( struct kgd_dev *kgd, - uint8_t vmid); - uint16_t (*get_atc_vmid_pasid_mapping_pasid)( - struct kgd_dev *kgd, - uint8_t vmid); + uint8_t vmid, + uint16_t *p_pasid); + /* No longer needed from GFXv9 onward. The scratch base address is + * passed to the shader by the CP. It's the user mode driver's + * responsibility. + */ void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); + int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 27cf0afaa0b4..a7f92d0b3a90 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -179,6 +179,11 @@ enum pp_mp1_state { PP_MP1_STATE_RESET, }; +enum pp_df_cstate { + DF_CSTATE_DISALLOW = 0, + DF_CSTATE_ALLOW, +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -215,6 +220,9 @@ enum pp_mp1_state { ((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \ (support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT) +#define XGMI_MODE_PSTATE_D3 0 +#define XGMI_MODE_PSTATE_D0 1 + struct seq_file; enum amd_pp_clock_type; struct amd_pp_simple_clock_info; @@ -312,6 +320,8 @@ struct amd_pm_funcs { int (*get_ppfeature_status)(void *handle, char *buf); int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks); int (*asic_reset_mode_2)(void *handle); + int (*set_df_cstate)(void *handle, enum pp_df_cstate state); + int (*set_xgmi_pstate)(void *handle, uint32_t pstate); }; #endif diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h index 094648cac392..07633e22e99a 100644 --- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h +++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h @@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, @@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300 #define UVD0_BASE__INST6_SEG3 0 #define UVD0_BASE__INST6_SEG4 0 +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0 +#define DCN_BASE__INST0_SEG4 0 + +#define DCN_BASE__INST1_SEG0 0 +#define DCN_BASE__INST1_SEG1 0 +#define DCN_BASE__INST1_SEG2 0 +#define DCN_BASE__INST1_SEG3 0 +#define DCN_BASE__INST1_SEG4 0 + +#define DCN_BASE__INST2_SEG0 0 +#define DCN_BASE__INST2_SEG1 0 +#define DCN_BASE__INST2_SEG2 0 +#define DCN_BASE__INST2_SEG3 0 +#define DCN_BASE__INST2_SEG4 0 + +#define DCN_BASE__INST3_SEG0 0 +#define DCN_BASE__INST3_SEG1 0 +#define DCN_BASE__INST3_SEG2 0 +#define DCN_BASE__INST3_SEG3 0 +#define DCN_BASE__INST3_SEG4 0 + +#define DCN_BASE__INST4_SEG0 0 +#define DCN_BASE__INST4_SEG1 0 +#define DCN_BASE__INST4_SEG2 0 +#define DCN_BASE__INST4_SEG3 0 +#define DCN_BASE__INST4_SEG4 0 #endif diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h index c14ba65a2415..adf1b754666e 100644 --- a/drivers/gpu/drm/amd/include/vega10_enum.h +++ b/drivers/gpu/drm/amd/include/vega10_enum.h @@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001, typedef enum MTYPE { MTYPE_NC = 0x00000000, MTYPE_WC = 0x00000001, +MTYPE_RW = 0x00000001, MTYPE_CC = 0x00000002, MTYPE_UC = 0x00000003, } MTYPE; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index fa8ad7db2b3a..5087d6bdba60 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -48,7 +48,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); - hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false; hwmgr->device = amdgpu_cgs_create_device(adev); mutex_init(&hwmgr->smu_lock); hwmgr->chip_family = adev->family; @@ -276,6 +275,9 @@ static int pp_dpm_load_fw(void *handle) { struct pp_hwmgr *hwmgr = handle; + if (!hwmgr->not_vf) + return 0; + if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) return -EINVAL; @@ -969,6 +971,14 @@ static int pp_dpm_switch_power_profile(void *handle, workload = hwmgr->workload_setting[index]; } + if (type == PP_SMC_POWER_PROFILE_COMPUTE && + hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { + if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) { + mutex_unlock(&hwmgr->smu_lock); + return -EINVAL; + } + } + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); mutex_unlock(&hwmgr->smu_lock); @@ -1421,6 +1431,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) { struct pp_hwmgr *hwmgr = handle; + *cap = false; if (!hwmgr) return -EINVAL; @@ -1548,6 +1559,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire) return ret; } +static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1606,4 +1651,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_ppfeature_status = pp_set_ppfeature_status, .asic_reset_mode_2 = pp_asic_reset_mode_2, .smu_i2c_bus_access = pp_smu_i2c_bus_access, + .set_df_cstate = pp_set_df_cstate, + .set_xgmi_pstate = pp_set_xgmi_pstate, }; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 4acf139ea014..6dddd7818558 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -25,11 +25,16 @@ #include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "soc15_common.h" #include "smu_v11_0.h" #include "smu_v12_0.h" #include "atom.h" #include "amd_pcie.h" +#include "vega20_ppt.h" +#include "arcturus_ppt.h" +#include "navi10_ppt.h" +#include "renoir_ppt.h" #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) #type @@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) uint32_t sort_feature[SMU_FEATURE_COUNT]; uint64_t hw_feature_count = 0; + mutex_lock(&smu->mutex); + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) goto failed; @@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) } failed: + mutex_unlock(&smu->mutex); + return size; } +static int smu_feature_update_enable_state(struct smu_context *smu, + uint64_t feature_mask, + bool enabled) +{ + struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_low = 0, feature_high = 0; + int ret = 0; + + if (!smu->pm_enabled) + return ret; + + feature_low = (feature_mask >> 0 ) & 0xffffffff; + feature_high = (feature_mask >> 32) & 0xffffffff; + + if (enabled) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } + + mutex_lock(&feature->mutex); + if (enabled) + bitmap_or(feature->enabled, feature->enabled, + (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); + else + bitmap_andnot(feature->enabled, feature->enabled, + (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); + mutex_unlock(&feature->mutex); + + return ret; +} + int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) { int ret = 0; @@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) uint64_t feature_2_disabled = 0; uint64_t feature_enables = 0; + mutex_lock(&smu->mutex); + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) - return ret; + goto out; feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); @@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) if (feature_2_enabled) { ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); if (ret) - return ret; + goto out; } if (feature_2_disabled) { ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); if (ret) - return ret; + goto out; } +out: + mutex_unlock(&smu->mutex); + return ret; } @@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max) { - int ret = 0, clk_id = 0; - uint32_t param; + int ret = 0; if (min <= 0 && max <= 0) return -EINVAL; @@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (!smu_clk_dpm_is_enabled(smu, clk_type)) return 0; - clk_id = smu_clk_get_index(smu, clk_type); - if (clk_id < 0) - return clk_id; - - if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, - param); - if (ret) - return ret; - } - - if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, - param); - if (ret) - return ret; - } - - + ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); return ret; } @@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, } int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max) + uint32_t *min, uint32_t *max, bool lock_needed) { uint32_t clock_limit; int ret = 0; @@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (!min && !max) return -EINVAL; + if (lock_needed) + mutex_lock(&smu->mutex); + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { switch (clk_type) { case SMU_MCLK: @@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, *min = clock_limit / 100; if (max) *max = clock_limit / 100; - - return 0; + } else { + /* + * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the + * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). + */ + ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); } - /* - * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the - * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). - */ - ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); + + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) return true; } - +/** + * smu_dpm_set_power_gate - power gate/ungate the specific IP block + * + * @smu: smu_context pointer + * @block_type: the IP block to power gate/ungate + * @gate: to power gate if true, ungate otherwise + * + * This API uses no smu->mutex lock protection due to: + * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). + * This is guarded to be race condition free by the caller. + * 2. Or get called on user setting request of power_dpm_force_performance_level. + * Under this case, the smu->mutex lock protection is already enforced on + * the parent API smu_force_performance_level of the call path. + */ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, bool gate) { @@ -357,6 +415,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, case AMD_IP_BLOCK_TYPE_SDMA: ret = smu_powergate_sdma(smu, gate); break; + case AMD_IP_BLOCK_TYPE_JPEG: + ret = smu_dpm_set_jpeg_enable(smu, gate); + break; default: break; } @@ -364,12 +425,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, return ret; } -enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) -{ - /* not support power state */ - return POWER_STATE_TYPE_DEFAULT; -} - int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info) { @@ -439,7 +494,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int int ret = 0; int table_id = smu_table_get_index(smu, table_index); - if (!table_data || table_id >= smu_table->table_count || table_id < 0) + if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; table = &smu_table->tables[table_index]; @@ -463,7 +518,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int return ret; /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); if (!drv2smu) memcpy(table_data, table->cpu_addr, table->size); @@ -475,15 +530,18 @@ bool is_support_sw_smu(struct amdgpu_device *adev) { if (adev->asic_type == CHIP_VEGA20) return (amdgpu_dpm == 2) ? true : false; - else if (adev->asic_type >= CHIP_ARCTURUS) - return true; - else + else if (adev->asic_type >= CHIP_ARCTURUS) { + if (amdgpu_sriov_vf(adev)) + return false; + else + return true; + } else return false; } bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) { - if (amdgpu_dpm != 1) + if (!is_support_sw_smu(adev)) return false; if (adev->asic_type == CHIP_VEGA20) @@ -495,16 +553,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) int smu_sys_get_pp_table(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; + uint32_t powerplay_table_size; if (!smu_table->power_play_table && !smu_table->hardcode_pptable) return -EINVAL; + mutex_lock(&smu->mutex); + if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; else *table = smu_table->power_play_table; - return smu_table->power_play_table_size; + powerplay_table_size = smu_table->power_play_table_size; + + mutex_unlock(&smu->mutex); + + return powerplay_table_size; } int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) @@ -531,13 +596,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) memcpy(smu_table->hardcode_pptable, buf, size); smu_table->power_play_table = smu_table->hardcode_pptable; smu_table->power_play_table_size = size; - mutex_unlock(&smu->mutex); + + /* + * Special hw_fini action(for Navi1x, the DPMs disablement will be + * skipped) may be needed for custom pptable uploading. + */ + smu->uploading_custom_pp_table = true; ret = smu_reset(smu); if (ret) pr_info("smu reset failed, ret = %d\n", ret); - return ret; + smu->uploading_custom_pp_table = false; failed: mutex_unlock(&smu->mutex); @@ -569,41 +639,7 @@ int smu_feature_init_dpm(struct smu_context *smu) return ret; } -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled) -{ - uint32_t feature_low = 0, feature_high = 0; - int ret = 0; - - if (!smu->pm_enabled) - return ret; - feature_low = (feature_mask >> 0 ) & 0xffffffff; - feature_high = (feature_mask >> 32) & 0xffffffff; - - if (enabled) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } - - return ret; -} int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { @@ -633,8 +669,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, { struct smu_feature *feature = &smu->smu_feature; int feature_id; - uint64_t feature_mask = 0; - int ret = 0; feature_id = smu_feature_get_index(smu, mask); if (feature_id < 0) @@ -642,22 +676,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, WARN_ON(feature_id > feature->feature_num); - feature_mask = 1ULL << feature_id; - - mutex_lock(&feature->mutex); - ret = smu_feature_update_enable_state(smu, feature_mask, enable); - if (ret) - goto failed; - - if (enable) - test_and_set_bit(feature_id, feature->enabled); - else - test_and_clear_bit(feature_id, feature->enabled); - -failed: - mutex_unlock(&feature->mutex); - - return ret; + return smu_feature_update_enable_state(smu, + 1ULL << feature_id, + enable); } int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) @@ -707,20 +728,27 @@ static int smu_set_funcs(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; + if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) + smu->od_enabled = true; + switch (adev->asic_type) { case CHIP_VEGA20: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + vega20_set_ppt_funcs(smu); + break; case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: + navi10_set_ppt_funcs(smu); + break; case CHIP_ARCTURUS: - if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) - smu->od_enabled = true; - smu_v11_0_set_smu_funcs(smu); + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + arcturus_set_ppt_funcs(smu); + /* OD is not supported on Arcturus */ + smu->od_enabled =false; break; case CHIP_RENOIR: - if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) - smu->od_enabled = true; - smu_v12_0_set_smu_funcs(smu); + renoir_set_ppt_funcs(smu); break; default: return -EINVAL; @@ -736,6 +764,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; + smu->is_apu = false; mutex_init(&smu->mutex); return smu_set_funcs(adev); @@ -749,11 +778,10 @@ static int smu_late_init(void *handle) if (!smu->pm_enabled) return 0; - mutex_lock(&smu->mutex); smu_handle_task(&adev->smu, smu->smu_dpm.dpm_level, - AMD_PP_TASK_COMPLETE_INIT); - mutex_unlock(&smu->mutex); + AMD_PP_TASK_COMPLETE_INIT, + false); return 0; } @@ -919,14 +947,9 @@ static int smu_init_fb_allocations(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t table_count = smu_table->table_count; - uint32_t i = 0; - int32_t ret = 0; + int ret, i; - if (table_count <= 0) - return -EINVAL; - - for (i = 0 ; i < table_count; i++) { + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; ret = amdgpu_bo_create_kernel(adev, @@ -942,7 +965,7 @@ static int smu_init_fb_allocations(struct smu_context *smu) return 0; failed: - for (; i > 0; i--) { + while (--i >= 0) { if (tables[i].size == 0) continue; amdgpu_bo_free_kernel(&tables[i].bo, @@ -957,13 +980,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t table_count = smu_table->table_count; uint32_t i = 0; - if (table_count == 0 || tables == NULL) + if (!tables) return 0; - for (i = 0 ; i < table_count; i++) { + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; amdgpu_bo_free_kernel(&tables[i].bo, @@ -974,50 +996,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } -static int smu_override_pcie_parameters(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; - int ret; - - if (adev->flags & AMD_IS_APU) - return 0; - - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) - pcie_gen = 3; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - pcie_gen = 2; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - pcie_gen = 1; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) - pcie_gen = 0; - - /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 - * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 - * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 - */ - if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) - pcie_width = 6; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) - pcie_width = 5; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) - pcie_width = 4; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) - pcie_width = 3; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) - pcie_width = 2; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) - pcie_width = 1; - - smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; - ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg); - if (ret) - pr_err("[%s] Attempt to override pcie params failed!\n", __func__); - return ret; -} - static int smu_smc_table_hw_init(struct smu_context *smu, bool initialize) { @@ -1092,8 +1070,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - /* issue RunAfllBtc msg */ - ret = smu_run_afll_btc(smu); + /* issue Run*Btc msg */ + ret = smu_run_btc(smu); if (ret) return ret; @@ -1106,10 +1084,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; if (adev->asic_type != CHIP_ARCTURUS) { - ret = smu_override_pcie_parameters(smu); - if (ret) - return ret; - ret = smu_notify_display_change(smu); if (ret) return ret; @@ -1138,6 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; } + if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_override_pcie_parameters(smu); + if (ret) + return ret; + } + ret = smu_set_default_od_settings(smu, initialize); if (ret) return ret; @@ -1147,7 +1127,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - ret = smu_get_power_limit(smu, &smu->default_power_limit, true); + ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false); if (ret) return ret; } @@ -1212,10 +1192,9 @@ static int smu_free_memory_pool(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *memory_pool = &smu_table->memory_pool; - int ret = 0; if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) - return ret; + return 0; amdgpu_bo_free_kernel(&memory_pool->bo, &memory_pool->mc_address, @@ -1223,32 +1202,50 @@ static int smu_free_memory_pool(struct smu_context *smu) memset(memory_pool, 0, sizeof(struct smu_table)); - return ret; + return 0; } -static int smu_hw_init(void *handle) +static int smu_start_smc_engine(struct smu_context *smu) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct amdgpu_device *adev = smu->adev; + int ret = 0; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->asic_type < CHIP_NAVI10) { - ret = smu_load_microcode(smu); - if (ret) - return ret; + if (smu->ppt_funcs->load_microcode) { + ret = smu->ppt_funcs->load_microcode(smu); + if (ret) + return ret; + } } } - ret = smu_check_fw_status(smu); + if (smu->ppt_funcs->check_fw_status) { + ret = smu->ppt_funcs->check_fw_status(smu); + if (ret) + pr_err("SMC is not ready\n"); + } + + return ret; +} + +static int smu_hw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct smu_context *smu = &adev->smu; + + ret = smu_start_smc_engine(smu); if (ret) { - pr_err("SMC firmware status is not correct\n"); + pr_err("SMU is not ready yet!\n"); return ret; } if (adev->flags & AMD_IS_APU) { smu_powergate_sdma(&adev->smu, false); smu_powergate_vcn(&adev->smu, false); + smu_powergate_jpeg(&adev->smu, false); + smu_set_gfx_cgpg(&adev->smu, true); } if (!smu->pm_enabled) @@ -1291,6 +1288,11 @@ failed: return ret; } +static int smu_stop_dpms(struct smu_context *smu) +{ + return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1301,6 +1303,34 @@ static int smu_hw_fini(void *handle) if (adev->flags & AMD_IS_APU) { smu_powergate_sdma(&adev->smu, true); smu_powergate_vcn(&adev->smu, true); + smu_powergate_jpeg(&adev->smu, true); + } + + ret = smu_stop_thermal_control(smu); + if (ret) { + pr_warn("Fail to stop thermal control!\n"); + return ret; + } + + /* + * For custom pptable uploading, skip the DPM features + * disable process on Navi1x ASICs. + * - As the gfx related features are under control of + * RLC on those ASICs. RLC reinitialization will be + * needed to reenable them. That will cost much more + * efforts. + * + * - SMU firmware can handle the DPM reenablement + * properly. + */ + if (!smu->uploading_custom_pp_table || + !((adev->asic_type >= CHIP_NAVI10) && + (adev->asic_type <= CHIP_NAVI12))) { + ret = smu_stop_dpms(smu); + if (ret) { + pr_warn("Fail to stop Dpms!\n"); + return ret; + } } kfree(table_context->driver_pptable); @@ -1344,13 +1374,16 @@ static int smu_suspend(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); + bool baco_feature_is_enabled = false; + + if(!(adev->flags & AMD_IS_APU)) + baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); ret = smu_system_features_control(smu, false); if (ret) return ret; - if (adev->in_gpu_reset && baco_feature_is_enabled) { + if (baco_feature_is_enabled) { ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); if (ret) { pr_warn("set BACO feature enabled failed, return %d\n", ret); @@ -1363,6 +1396,8 @@ static int smu_suspend(void *handle) if (adev->asic_type >= CHIP_NAVI10 && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); + if (smu->is_apu) + smu_set_gfx_cgpg(&adev->smu, false); return 0; } @@ -1375,7 +1410,11 @@ static int smu_resume(void *handle) pr_info("SMU is resuming...\n"); - mutex_lock(&smu->mutex); + ret = smu_start_smc_engine(smu); + if (ret) { + pr_err("SMU is not ready yet!\n"); + goto failed; + } ret = smu_smc_table_hw_init(smu, false); if (ret) @@ -1385,13 +1424,16 @@ static int smu_resume(void *handle) if (ret) goto failed; - mutex_unlock(&smu->mutex); + if (smu->is_apu) + smu_set_gfx_cgpg(&adev->smu, true); + + smu->disable_uclk_switch = 0; pr_info("SMU is resumed successfully!\n"); return 0; + failed: - mutex_unlock(&smu->mutex); return ret; } @@ -1409,8 +1451,9 @@ int smu_display_configuration_change(struct smu_context *smu, mutex_lock(&smu->mutex); - smu_set_deep_sleep_dcefclk(smu, - display_config->min_dcef_deep_sleep_set_clk / 100); + if (smu->ppt_funcs->set_deep_sleep_dcefclk) + smu->ppt_funcs->set_deep_sleep_dcefclk(smu, + display_config->min_dcef_deep_sleep_set_clk / 100); for (index = 0; index < display_config->num_path_including_non_display; index++) { if (display_config->displays[index].controller_id != 0) @@ -1529,7 +1572,8 @@ static int smu_enable_umd_pstate(void *handle, struct smu_context *smu = (struct smu_context*)(handle); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context) + + if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) return -EINVAL; if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { @@ -1587,9 +1631,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d &soc_mask); if (ret) return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1653,7 +1697,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, workload = smu->workload_setting[index]; if (smu->power_profile_mode != workload) - smu_set_power_profile_mode(smu, &workload, 0); + smu_set_power_profile_mode(smu, &workload, 0, false); } return ret; @@ -1661,18 +1705,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id) + enum amd_pp_task task_id, + bool lock_needed) { int ret = 0; + if (lock_needed) + mutex_lock(&smu->mutex); + switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: ret = smu_pre_display_config_changed(smu); if (ret) - return ret; + goto out; ret = smu_set_cpu_power_state(smu); if (ret) - return ret; + goto out; ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: @@ -1683,6 +1731,10 @@ int smu_handle_task(struct smu_context *smu, break; } +out: + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -1715,7 +1767,7 @@ int smu_switch_power_profile(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - smu_set_power_profile_mode(smu, &workload, 0); + smu_set_power_profile_mode(smu, &workload, 0, false); mutex_unlock(&smu->mutex); @@ -1727,7 +1779,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); enum amd_dpm_forced_level level; - if (!smu_dpm_ctx->dpm_context) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; mutex_lock(&(smu->mutex)); @@ -1742,15 +1794,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int ret = 0; - if (!smu_dpm_ctx->dpm_context) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; + mutex_lock(&smu->mutex); + ret = smu_enable_umd_pstate(smu, &level); - if (ret) + if (ret) { + mutex_unlock(&smu->mutex); return ret; + } ret = smu_handle_task(smu, level, - AMD_PP_TASK_READJUST_POWER_STATE); + AMD_PP_TASK_READJUST_POWER_STATE, + false); + + mutex_unlock(&smu->mutex); return ret; } @@ -1766,6 +1825,143 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) return ret; } +int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask, + bool lock_needed) +{ + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int ret = 0; + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + pr_debug("force clock level is for dpm manual mode only.\n"); + return -EINVAL; + } + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) + ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + uint16_t msg; + int ret; + + /* + * The SMC is not fully ready. That may be + * expected as the IP may be masked. + * So, just return without error. + */ + if (!smu->pm_enabled) + return 0; + + mutex_lock(&smu->mutex); + + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = SMU_MSG_PrepareMp1ForShutdown; + break; + case PP_MP1_STATE_UNLOAD: + msg = SMU_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_RESET: + msg = SMU_MSG_PrepareMp1ForReset; + break; + case PP_MP1_STATE_NONE: + default: + mutex_unlock(&smu->mutex); + return 0; + } + + /* some asics may not support those messages */ + if (smu_msg_get_index(smu, msg) < 0) { + mutex_unlock(&smu->mutex); + return 0; + } + + ret = smu_send_smc_msg(smu, msg); + if (ret) + pr_err("[PrepareMp1] Failed!\n"); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + int ret = 0; + + /* + * The SMC is not fully ready. That may be + * expected as the IP may be masked. + * So, just return without error. + */ + if (!smu->pm_enabled) + return 0; + + if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) + return 0; + + mutex_lock(&smu->mutex); + + ret = smu->ppt_funcs->set_df_cstate(smu, state); + if (ret) + pr_err("[SetDfCstate] failed!\n"); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_write_watermarks_table(struct smu_context *smu) +{ + int ret = 0; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = NULL; + + table = &smu_table->tables[SMU_TABLE_WATERMARKS]; + + if (!table->cpu_addr) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, + true); + + return ret; +} + +int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) +{ + struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; + void *table = watermarks->cpu_addr; + + mutex_lock(&smu->mutex); + + if (!smu->disable_watermark && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + smu_set_watermarks_table(smu, table, clock_ranges); + smu->watermarks_bitmap |= WATERMARKS_EXIST; + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } + + mutex_unlock(&smu->mutex); + + return 0; +} + const struct amd_ip_funcs smu_ip_funcs = { .name = "smu", .early_init = smu_early_init, @@ -1802,3 +1998,582 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = .rev = 0, .funcs = &smu_ip_funcs, }; + +int smu_load_microcode(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->load_microcode) + ret = smu->ppt_funcs->load_microcode(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_check_fw_status(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->check_fw_status) + ret = smu->ppt_funcs->check_fw_status(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_gfx_cgpg) + ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_speed_rpm) + ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool def, + bool lock_needed) +{ + int ret = 0; + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_power_limit) + ret = smu->ppt_funcs->get_power_limit(smu, limit, def); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_power_limit(struct smu_context *smu, uint32_t limit) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_power_limit) + ret = smu->ppt_funcs->set_power_limit(smu, limit); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->print_clk_levels) + ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_od_percentage) + ret = smu->ppt_funcs->get_od_percentage(smu, type); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_od_percentage) + ret = smu->ppt_funcs->set_od_percentage(smu, type, value); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->od_edit_dpm_table) + ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->read_sensor) + ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_power_profile_mode(struct smu_context *smu, char *buf) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_power_profile_mode) + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size, + bool lock_needed) +{ + int ret = 0; + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_get_fan_control_mode(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_control_mode) + ret = smu->ppt_funcs->get_fan_control_mode(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_control_mode(struct smu_context *smu, int value) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_control_mode) + ret = smu->ppt_funcs->set_fan_control_mode(smu, value); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_speed_percent) + ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_speed_percent) + ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_speed_rpm) + ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_deep_sleep_dcefclk) + ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_active_display_count(struct smu_context *smu, uint32_t count) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_active_display_count) + ret = smu->ppt_funcs->set_active_display_count(smu, count); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type(struct smu_context *smu, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type) + ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_max_high_clocks(struct smu_context *smu, + struct amd_pp_simple_clock_info *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_max_high_clocks) + ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type_with_latency) + ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type_with_voltage(struct smu_context *smu, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type_with_voltage) + ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request *clock_req) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->display_clock_voltage_request) + ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); + + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) +{ + int ret = -EINVAL; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->display_disable_memory_clock_switch) + ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_notify_smu_enable_pwe(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->notify_smu_enable_pwe) + ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_xgmi_pstate) + ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_azalia_d3_pme(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_azalia_d3_pme) + ret = smu->ppt_funcs->set_azalia_d3_pme(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +bool smu_baco_is_support(struct smu_context *smu) +{ + bool ret = false; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_is_support) + ret = smu->ppt_funcs->baco_is_support(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) +{ + if (smu->ppt_funcs->baco_get_state) + return -EINVAL; + + mutex_lock(&smu->mutex); + *state = smu->ppt_funcs->baco_get_state(smu); + mutex_unlock(&smu->mutex); + + return 0; +} + +int smu_baco_enter(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_enter) + ret = smu->ppt_funcs->baco_enter(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_baco_exit(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_exit) + ret = smu->ppt_funcs->baco_exit(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->mode2_reset) + ret = smu->ppt_funcs->mode2_reset(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) + ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_uclk_dpm_states(struct smu_context *smu, + unsigned int *clock_values_in_khz, + unsigned int *num_states) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_uclk_dpm_states) + ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); + + mutex_unlock(&smu->mutex); + + return ret; +} + +enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) +{ + enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_current_power_state) + pm_state = smu->ppt_funcs->get_current_power_state(smu); + + mutex_unlock(&smu->mutex); + + return pm_state; +} + +int smu_get_dpm_clock_table(struct smu_context *smu, + struct dpm_clocks *clock_table) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_dpm_clock_table) + ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); + + mutex_unlock(&smu->mutex); + + return ret; +} + +uint32_t smu_get_pptable_power_limit(struct smu_context *smu) +{ + uint32_t ret = 0; + + if (smu->ppt_funcs->get_pptable_power_limit) + ret = smu->ppt_funcs->get_pptable_power_limit(smu); + + return ret; +} + +int smu_send_smc_msg(struct smu_context *smu, + enum smu_message_type msg) +{ + int ret; + + ret = smu_send_smc_msg_with_param(smu, msg, 0); + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index d493a3f8c07a..17eeb546c550 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -36,6 +37,12 @@ #include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" #include "nbio/nbio_7_4_sh_mask.h" +#include "amdgpu_xgmi.h" +#include <linux/i2c.h> +#include <linux/pci.h> +#include "amdgpu_ras.h" + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev #define CTF_OFFSET_EDGE 5 #define CTF_OFFSET_HOTSPOT 5 @@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown), MSG_MAP(SoftReset, PPSMC_MSG_SoftReset), MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc), - MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc), - MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize), @@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { TAB_MAP(SMU_METRICS), TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(OVERDRIVE), + TAB_MAP(I2C_COMMANDS), }; static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -273,10 +280,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER return -EINVAL; mapping = arcturus_workload_map[profile]; - if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU power source: %d\n", profile); + if (!(mapping.valid_mapping)) return -EINVAL; - } return mapping.map_to; } @@ -294,6 +299,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -485,6 +493,7 @@ static int arcturus_store_powerplay_table(struct smu_context *smu) { struct smu_11_0_powerplay_table *powerplay_table = NULL; struct smu_table_context *table_context = &smu->smu_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; int ret = 0; if (!table_context->power_play_table) @@ -497,6 +506,12 @@ static int arcturus_store_powerplay_table(struct smu_context *smu) table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + mutex_lock(&smu_baco->mutex); + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) + smu_baco->platform_support = true; + mutex_unlock(&smu_baco->mutex); + return ret; } @@ -528,9 +543,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu) return 0; } -static int arcturus_run_btc_afll(struct smu_context *smu) +static int arcturus_run_btc(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + int ret = 0; + + ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + if (ret) { + pr_err("RunAfllBtc failed!\n"); + return ret; + } + + return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); } static int arcturus_populate_umd_state_clk(struct smu_context *smu) @@ -610,12 +633,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu, return ret; } + /* + * For DPM disabled case, there will be only one clock level. + * And it's safe to assume that is always the current clock. + */ for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_MCLK: @@ -635,9 +663,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_SOCCLK: @@ -657,9 +686,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_FCLK: @@ -679,9 +709,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < single_dpm_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, single_dpm_table->dpm_levels[i].value, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; default: @@ -756,8 +787,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, uint32_t soft_min_level, soft_max_level; int ret = 0; - mutex_lock(&(smu->mutex)); - soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -792,91 +821,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu, break; case SMU_MCLK: - single_dpm_table = &(dpm_table->mem_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - - break; - case SMU_SOCCLK: - single_dpm_table = &(dpm_table->soc_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - - break; - case SMU_FCLK: - single_dpm_table = &(dpm_table->fclk_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - + /* + * Should not arrive here since Arcturus does not + * support mclk/socclk/fclk softmin/softmax settings + */ + ret = -EINVAL; break; default: break; } - mutex_unlock(&(smu->mutex)); return ret; } @@ -1043,7 +1000,7 @@ static int arcturus_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -1186,6 +1143,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) { struct arcturus_dpm_table *dpm_table = (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0); uint32_t soft_level; int ret = 0; @@ -1199,40 +1157,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) dpm_table->gfx_table.dpm_state.soft_max_level = dpm_table->gfx_table.dpm_levels[soft_level].value; - /* uclk */ - if (highest) - soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); - else - soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); - - dpm_table->mem_table.dpm_state.soft_min_level = - dpm_table->mem_table.dpm_state.soft_max_level = - dpm_table->mem_table.dpm_levels[soft_level].value; - - /* socclk */ - if (highest) - soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); - else - soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); - - dpm_table->soc_table.dpm_state.soft_min_level = - dpm_table->soc_table.dpm_state.soft_max_level = - dpm_table->soc_table.dpm_levels[soft_level].value; - - ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload boot level to %s!\n", highest ? "highest" : "lowest"); return ret; } - ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload dpm max level to %s!\n!", highest ? "highest" : "lowest"); return ret; } + if (hive) + /* + * Force XGMI Pstate to highest or lowest + * TODO: revise this when xgmi dpm is functional + */ + ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0); + return ret; } @@ -1240,6 +1185,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu) { struct arcturus_dpm_table *dpm_table = (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0); uint32_t soft_min_level, soft_max_level; int ret = 0; @@ -1251,34 +1197,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu) dpm_table->gfx_table.dpm_state.soft_max_level = dpm_table->gfx_table.dpm_levels[soft_max_level].value; - /* uclk */ - soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); - soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); - dpm_table->mem_table.dpm_state.soft_min_level = - dpm_table->gfx_table.dpm_levels[soft_min_level].value; - dpm_table->mem_table.dpm_state.soft_max_level = - dpm_table->gfx_table.dpm_levels[soft_max_level].value; - - /* socclk */ - soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); - soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); - dpm_table->soc_table.dpm_state.soft_min_level = - dpm_table->soc_table.dpm_levels[soft_min_level].value; - dpm_table->soc_table.dpm_state.soft_max_level = - dpm_table->soc_table.dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload DPM Bootup Levels!"); return ret; } - ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload DPM Max Levels!"); return ret; } + if (hive) + /* + * Reset XGMI Pstate back to default + * TODO: revise this when xgmi dpm is functional + */ + ret = smu_v11_0_set_xgmi_pstate(smu, 0); + return ret; } @@ -1329,15 +1266,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu, static int arcturus_get_power_limit(struct smu_context *smu, uint32_t *limit, - bool asic_default) + bool cap) { PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t asic_default_power_limit = 0; int ret = 0; int power_src; - if (!smu->default_power_limit || - !smu->power_limit) { + if (!smu->power_limit) { if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); if (power_src < 0) @@ -1360,17 +1296,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - if (smu->od_enabled) { - asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit); - asic_default_power_limit /= 100; - } - - smu->default_power_limit = asic_default_power_limit; smu->power_limit = asic_default_power_limit; } - if (asic_default) - *limit = smu->default_power_limit; + if (cap) + *limit = smu_v11_0_get_max_power_limit(smu); else *limit = smu->power_limit; @@ -1388,12 +1318,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, "VR", "COMPUTE", "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)"}; uint32_t i, size = 0; int16_t workload_type = 0; if (!smu->pm_enabled || !buf) return -EINVAL; + size += sprintf(buf + size, "%16s\n", + title[0]); + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT @@ -1891,6 +1826,260 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } +static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1); + if (ret) { + pr_err("[EnableVCNDPM] failed!\n"); + return ret; + } + } + power_gate->vcn_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0); + if (ret) { + pr_err("[DisableVCNDPM] failed!\n"); + return ret; + } + } + power_gate->vcn_gated = true; + } + + return ret; +} + + +static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write, + uint8_t address, uint32_t numbytes, + uint8_t *data) +{ + int i; + + BUG_ON(numbytes > MAX_SW_I2C_COMMANDS); + + req->I2CcontrollerPort = 0; + req->I2CSpeed = 2; + req->SlaveAddress = address; + req->NumCmds = numbytes; + + for (i = 0; i < numbytes; i++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[i]; + + /* First 2 bytes are always write for lower 2b EEPROM address */ + if (i < 2) + cmd->Cmd = 1; + else + cmd->Cmd = write; + + + /* Add RESTART for read after address filled */ + cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0; + + /* Add STOP in the end */ + cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0; + + /* Fill with data regardless if read or write to simplify code */ + cmd->RegisterAddr = data[i]; + } +} + +static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t i, ret = 0; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS]; + + memset(&req, 0, sizeof(req)); + arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + /* Now read data starting with that address */ + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, + true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr; + + /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */ + for (i = 0; i < numbytes; i++) + data[i] = res->SwI2cCmds[i].Data; + + pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + } else + pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret); + + return ret; +} + +static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t ret; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + + memset(&req, 0, sizeof(req)); + arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + /* + * According to EEPROM spec there is a MAX of 10 ms required for + * EEPROM to flush internal RX buffer after STOP was issued at the + * end of write transaction. During this time the EEPROM will not be + * responsive to any more commands - so wait a bit more. + */ + msleep(10); + + } else + pr_err("arcturus_i2c_write- error occurred :%x", ret); + + return ret; +} + +static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0; + uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 }; + + for (i = 0; i < num; i++) { + /* + * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at + * once and hence the data needs to be spliced into chunks and sent each + * chunk separately + */ + data_size = msgs[i].len - 2; + data_chunk_size = MAX_SW_I2C_COMMANDS - 2; + next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff); + data_ptr = msgs[i].buf + 2; + + for (j = 0; j < data_size / data_chunk_size; j++) { + /* Insert the EEPROM dest addess, bits 0-15 */ + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = arcturus_i2c_eeprom_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + + memcpy(data_ptr, data_chunk + 2, data_chunk_size); + } else { + + memcpy(data_chunk + 2, data_ptr, data_chunk_size); + + ret = arcturus_i2c_eeprom_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + } + + if (ret) { + num = -EIO; + goto fail; + } + + next_eeprom_addr += data_chunk_size; + data_ptr += data_chunk_size; + } + + if (data_size % data_chunk_size) { + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = arcturus_i2c_eeprom_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + + memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size); + } else { + memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size); + + ret = arcturus_i2c_eeprom_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + } + + if (ret) { + num = -EIO; + goto fail; + } + } + } + +fail: + return num; +} + +static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + + +static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = { + .master_xfer = arcturus_i2c_eeprom_i2c_xfer, + .functionality = arcturus_i2c_eeprom_i2c_func, +}; + +static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; + + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &arcturus_i2c_eeprom_i2c_algo; + snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + + res = i2c_add_adapter(control); + if (res) + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + + return res; +} + +static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) +{ + i2c_del_adapter(control); +} + +static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + + return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; +} + static const struct pptable_funcs arcturus_ppt_funcs = { /* translate smu index into arcturus specific index */ .get_smu_msg_index = arcturus_get_smu_msg_index, @@ -1909,7 +2098,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = arcturus_get_allowed_feature_mask, /* btc */ - .run_afll_btc = arcturus_run_btc_afll, + .run_btc = arcturus_run_btc, /* dpm/clk tables */ .set_default_dpm_table = arcturus_set_default_dpm_table, .populate_umd_state_clk = arcturus_populate_umd_state_clk, @@ -1929,12 +2118,62 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, + .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable, + .i2c_eeprom_init = arcturus_i2c_eeprom_control_init, + .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_enter = smu_v11_0_baco_enter, + .baco_exit = smu_v11_0_baco_exit, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, + .get_pptable_power_limit = arcturus_get_pptable_power_limit, }; void arcturus_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &arcturus_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index cc63705920dc..2773966ae434 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \ pp_overdriver.o smu_helper.o \ vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \ vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \ - vega12_baco.o smu9_baco.o + vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \ + ci_baco.o smu7_baco.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c new file mode 100644 index 000000000000..3be40114e63d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c @@ -0,0 +1,195 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "ci_baco.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_DELAY_MS, 0, 0, 0, 20, 0 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 } +}; + +int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h new file mode 100644 index 000000000000..17041f187020 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __CI_BACO_H__ +#define __CI_BACO_H__ +#include "smu7_baco.h" + +extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c index 9c57c1f67749..1c73776bd606 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c @@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m return ret; } +bool baco_program_registers(struct pp_hwmgr *hwmgr, + const struct baco_cmd_entry *entry, + const u32 array_size) +{ + u32 i, reg = 0; + + for (i = 0; i < array_size; i++) { + if ((entry[i].cmd == CMD_WRITE) || + (entry[i].cmd == CMD_READMODIFYWRITE) || + (entry[i].cmd == CMD_WAITFOR)) + reg = entry[i].reg_offset; + if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask, + entry[i].shift, entry[i].val, entry[i].timeout)) + return false; + } + + return true; +} + bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, const struct soc15_baco_cmd_entry *entry, const u32 array_size) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h index 95296c916f4e..8393eb62706d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h @@ -33,6 +33,15 @@ enum baco_cmd_type { CMD_DELAY_US, }; +struct baco_cmd_entry { + enum baco_cmd_type cmd; + uint32_t reg_offset; + uint32_t mask; + uint32_t shift; + uint32_t timeout; + uint32_t val; +}; + struct soc15_baco_cmd_entry { enum baco_cmd_type cmd; uint32_t hwip; @@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry { uint32_t timeout; uint32_t val; }; + +extern bool baco_program_registers(struct pp_hwmgr *hwmgr, + const struct baco_cmd_entry *entry, + const u32 array_size); extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, const struct soc15_baco_cmd_entry *entry, const u32 array_size); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c new file mode 100644 index 000000000000..c0368f2dfb21 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "fiji_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 } +}; + +static const struct baco_cmd_entry clk_req_b_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 } +}; + +int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h new file mode 100644 index 000000000000..47f402900bdb --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __FIJI_BACO_H__ +#define __FIJI_BACO_H__ +#include "smu7_baco.h" + +extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index cc57fb953e62..253860d30b20 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -81,8 +81,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) adev = hwmgr->adev; /* Skip for suspend/resume case */ - if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) - && adev->in_suspend) { + if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr) + && !amdgpu_passthrough(adev) && adev->in_suspend) { pr_info("dpm has been enabled\n"); return 0; } @@ -200,6 +200,9 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); + if (!hwmgr->not_vf) + return 0; + if (hwmgr->hwmgr_func->stop_thermal_controller == NULL) return -EINVAL; @@ -237,6 +240,9 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) TEMP_RANGE_MAX}; struct amdgpu_device *adev = hwmgr->adev; + if (!hwmgr->not_vf) + return 0; + if (hwmgr->hwmgr_func->get_thermal_temperature_range) hwmgr->hwmgr_func->get_thermal_temperature_range( hwmgr, &range); @@ -263,6 +269,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); + if (hwmgr->pp_one_vf) + return false; if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) return false; @@ -482,6 +490,9 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); + if (!hwmgr->not_vf) + return 0; + if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index a24beaa4fb01..e2b82c902948 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) int hwmgr_early_init(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev; + if (!hwmgr) return -EINVAL; @@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr_init_workload_prority(hwmgr); hwmgr->gfxoff_state_changed_by_workload = false; + adev = hwmgr->adev; + switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CI: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &ci_smu_funcs; ci_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | @@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) smu7_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_CZ: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu8_smu_funcs; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; smu8_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_TOPAZ: @@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); @@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) vega12_hwmgr_init(hwmgr); break; case CHIP_VEGA20: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega20_smu_funcs; vega20_hwmgr_init(hwmgr); @@ -212,6 +221,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) { int ret = 0; + hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev); + hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf)) + ? true : false; if (!hwmgr->pm_en) return 0; @@ -270,6 +282,9 @@ err: int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) { + if (!hwmgr->not_vf) + return 0; + if (!hwmgr || !hwmgr->pm_en) return 0; @@ -290,6 +305,9 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr) { int ret = 0; + if (!hwmgr->not_vf) + return 0; + if (!hwmgr || !hwmgr->pm_en) return 0; @@ -309,6 +327,9 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) { int ret = 0; + if (!hwmgr->not_vf) + return 0; + if (!hwmgr) return -EINVAL; @@ -356,6 +377,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + if (!hwmgr->not_vf) + return ret; ret = phm_pre_display_configuration_changed(hwmgr); if (ret) return ret; @@ -372,6 +395,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, enum PP_StateUILabel requested_ui_label; struct pp_power_state *requested_ps = NULL; + if (!hwmgr->not_vf) + return ret; if (user_state == NULL) { ret = -EINVAL; break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c new file mode 100644 index 000000000000..8f8e296f2fe9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c @@ -0,0 +1,222 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "polaris_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_11_0_d.h" +#include "dce/dce_11_0_sh_mask.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 }, + { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } +}; + +static const struct baco_cmd_entry clk_req_b_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl_vg[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl_vg[] = +{ + { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } +}; + +int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + if (hwmgr->chip_id == CHIP_VEGAM) { + baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg)); + baco_program_registers(hwmgr, turn_off_plls_tbl_vg, + ARRAY_SIZE(turn_off_plls_tbl_vg)); + } else { + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + } + baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h new file mode 100644 index 000000000000..87a5fa0a157a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __POLARIS_BACO_H__ +#define __POLARIS_BACO_H__ +#include "smu7_baco.h" + +extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 6bf48934fdc4..31a32a79cfc2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -262,20 +262,22 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set uint32_t index; long workload; - if (!skip_display_settings) - phm_display_configuration_changed(hwmgr); - - if (hwmgr->ps) - power_state_management(hwmgr, new_ps); - else - /* - * for vega12/vega20 which does not support power state manager - * DAL clock limits should also be honoured - */ - phm_apply_clock_adjust_rules(hwmgr); - - if (!skip_display_settings) - phm_notify_smc_display_config_after_ps_adjustment(hwmgr); + if (hwmgr->not_vf) { + if (!skip_display_settings) + phm_display_configuration_changed(hwmgr); + + if (hwmgr->ps) + power_state_management(hwmgr, new_ps); + else + /* + * for vega12/vega20 which does not support power state manager + * DAL clock limits should also be honoured + */ + phm_apply_clock_adjust_rules(hwmgr); + + if (!skip_display_settings) + phm_notify_smc_display_config_after_ps_adjustment(hwmgr); + } if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) hwmgr->dpm_level = hwmgr->request_dpm_level; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 1115761982a7..4e8ab139bb3b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1151,12 +1151,11 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, struct smu10_hwmgr *data = hwmgr->backend; struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; Watermarks_t *table = &(data->water_marks_table); - int result = 0; smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false); data->water_marks_exist = true; - return result; + return 0; } static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c new file mode 100644 index 000000000000..044cda005aed --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smu7_baco.h" +#include "tonga_baco.h" +#include "fiji_baco.h" +#include "polaris_baco.h" +#include "ci_baco.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + *cap = false; + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) + return 0; + + reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); + + if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) + *cap = true; + + return 0; +} + +int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + reg = RREG32(mmBACO_CNTL); + + if (reg & BACO_CNTL__BACO_MODE_MASK) + /* gfx has already entered BACO state */ + *state = BACO_STATE_IN; + else + *state = BACO_STATE_OUT; + return 0; +} + +int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + case CHIP_TONGA: + return tonga_baco_set_state(hwmgr, state); + case CHIP_FIJI: + return fiji_baco_set_state(hwmgr, state); + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + return polaris_baco_set_state(hwmgr, state); +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + return ci_baco_set_state(hwmgr, state); +#endif + default: + return -EINVAL; + } +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h new file mode 100644 index 000000000000..be0d98abb536 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU7_BACO_H__ +#define __SMU7_BACO_H__ +#include "hwmgr.h" +#include "common_baco.h" + +extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); +extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 34f95e0e3ea4..d70abada66bf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -48,6 +48,7 @@ #include "smu7_clockpowergating.h" #include "processpptables.h" #include "pp_thermal.h" +#include "smu7_baco.h" #include "ivsrcid/ivsrcid_vislands30.h" @@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_voltage_lookup_table *lookup_table) { uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; table_size = lookup_table->count; PP_ASSERT_WITH_CODE(0 != lookup_table->count, @@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, for (j = i + 1; j > 0; j--) { if (lookup_table->entries[j].us_vdd < lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; + swap(lookup_table->entries[j - 1], + lookup_table->entries[j]); } } } @@ -3478,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) { + struct amdgpu_device *adev = hwmgr->adev; int i; u32 tmp = 0; if (!query) return -EINVAL; - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); - tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - *query = tmp; + /* + * PPSMC_MSG_GetCurrPkgPwr is not supported on: + * - Hawaii + * - Bonaire + * - Fiji + * - Tonga + */ + if ((adev->asic_type != CHIP_HAWAII) && + (adev->asic_type != CHIP_BONAIRE) && + (adev->asic_type != CHIP_FIJI) && + (adev->asic_type != CHIP_TONGA)) { + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); + tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *query = tmp; - if (tmp != 0) - return 0; + if (tmp != 0) + return 0; + } smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, @@ -3970,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if (hwmgr->hardcode_pp_table != NULL) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + tmp_result = smu7_update_avfs(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update avfs voltages!", @@ -4219,7 +4238,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t vbios_version; uint32_t tmp; /* Read MC indirect register offset 0x9F bits [3:0] to see @@ -4228,7 +4246,6 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) */ smu7_get_mc_microcode_version(hwmgr); - vbios_version = hwmgr->microcode_version_info.MC & 0xf; data->need_long_memory_training = false; @@ -4926,7 +4943,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); - len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); + len = ARRAY_SIZE(smu7_profiling); for (i = 0; i < len; i++) { if (i == hwmgr->power_profile_mode) { @@ -5058,13 +5075,11 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw PHM_PerformanceLevel *level) { const struct smu7_power_state *ps; - struct smu7_hwmgr *data; uint32_t i; if (level == NULL || hwmgr == NULL || state == NULL) return -EINVAL; - data = hwmgr->backend; ps = cast_const_phw_smu7_power_state(state); i = index > ps->performance_level_count - 1 ? @@ -5145,6 +5160,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, + .get_asic_baco_capability = smu7_baco_get_capability, + .get_asic_baco_state = smu7_baco_get_state, + .set_asic_baco_state = smu7_baco_set_state, .power_off_asic = smu7_power_off_asic, }; @@ -5167,13 +5185,11 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) { - int ret = 0; - hwmgr->hwmgr_func = &smu7_hwmgr_funcs; if (hwmgr->pp_table_version == PP_TABLE_V0) hwmgr->pptable_func = &pptable_funcs; else if (hwmgr->pp_table_version == PP_TABLE_V1) hwmgr->pptable_func = &pptable_v1_0_funcs; - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c new file mode 100644 index 000000000000..ea743bea8e29 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c @@ -0,0 +1,231 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "tonga_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +static const struct baco_cmd_entry gpio_tbl_iceland[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff } +}; + +static const struct baco_cmd_entry exit_baco_tbl_iceland[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_DELAY_MS, 0, 0, 0, 20, 0 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl_iceland[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + if (hwmgr->chip_id == CHIP_TOPAZ) + baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland)); + else + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (hwmgr->chip_id == CHIP_TOPAZ) { + if (baco_program_registers(hwmgr, exit_baco_tbl_iceland, + ARRAY_SIZE(exit_baco_tbl_iceland))) { + if (baco_program_registers(hwmgr, clean_baco_tbl_iceland, + ARRAY_SIZE(clean_baco_tbl_iceland))) + return 0; + } + } else { + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h new file mode 100644 index 000000000000..5dc16cc8a295 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __TONGA_BACO_H__ +#define __TONGA_BACO_H__ +#include "smu7_baco.h" + +extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d08493b67b67..148446570e21 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_voltage_lookup_table *lookup_table) { uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, "Lookup table is empty", return -EINVAL); @@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, for (j = i + 1; j > 0; j--) { if (lookup_table->entries[j].us_vdd < lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; + swap(lookup_table->entries[j - 1], + lookup_table->entries[j]); } } } @@ -914,6 +912,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.clockStep.memoryClock = 500; data->total_active_cus = adev->gfx.cu_info.number; + if (!hwmgr->not_vf) + return result; + /* Setup default Overdrive Fan control settings */ data->odn_fan_table.target_fan_speed = hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; @@ -981,6 +982,9 @@ static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) { + if (!hwmgr->not_vf) + return 0; + PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), "Failed to init sclk threshold!", return -EINVAL); @@ -2505,6 +2509,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to setup default DPM tables!", return result); + if (!hwmgr->not_vf) + return 0; + /* initialize ODN table */ if (hwmgr->od_enabled) { if (odn_table->max_vddc) { @@ -2828,6 +2835,8 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) struct vega10_hwmgr *data = hwmgr->backend; uint32_t i, feature_mask = 0; + if (!hwmgr->not_vf) + return 0; if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, @@ -2934,61 +2943,73 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = hwmgr->backend; int tmp_result, result = 0; - vega10_enable_disable_PCC_limit_feature(hwmgr, true); + if (hwmgr->not_vf) { + vega10_enable_disable_PCC_limit_feature(hwmgr, true); - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); - - tmp_result = vega10_construct_voltage_tables(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to construct voltage tables!", - result = tmp_result); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); - tmp_result = vega10_init_smc_table(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to initialize SMC table!", - result = tmp_result); + tmp_result = vega10_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to construct voltage tables!", + result = tmp_result); + } - if (PP_CAP(PHM_PlatformCaps_ThermalController)) { - tmp_result = vega10_enable_thermal_protection(hwmgr); + if (hwmgr->not_vf || hwmgr->pp_one_vf) { + tmp_result = vega10_init_smc_table(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable thermal protection!", - result = tmp_result); + "Failed to initialize SMC table!", + result = tmp_result); } - tmp_result = vega10_enable_vrhot_feature(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable VR hot feature!", - result = tmp_result); + if (hwmgr->not_vf) { + if (PP_CAP(PHM_PlatformCaps_ThermalController)) { + tmp_result = vega10_enable_thermal_protection(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable thermal protection!", + result = tmp_result); + } - tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable deep sleep master switch!", - result = tmp_result); + tmp_result = vega10_enable_vrhot_feature(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable VR hot feature!", + result = tmp_result); - tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to start DPM!", result = tmp_result); + tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable deep sleep master switch!", + result = tmp_result); + } + + if (hwmgr->not_vf) { + tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to start DPM!", result = tmp_result); + } - /* enable didt, do not abort if failed didt */ - tmp_result = vega10_enable_didt_config(hwmgr); - PP_ASSERT(!tmp_result, - "Failed to enable didt config!"); + if (hwmgr->not_vf) { + /* enable didt, do not abort if failed didt */ + tmp_result = vega10_enable_didt_config(hwmgr); + PP_ASSERT(!tmp_result, + "Failed to enable didt config!"); + } tmp_result = vega10_enable_power_containment(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable power containment!", - result = tmp_result); + "Failed to enable power containment!", + result = tmp_result); - tmp_result = vega10_power_control_set_level(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to power control set level!", - result = tmp_result); + if (hwmgr->not_vf) { + tmp_result = vega10_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to power control set level!", + result = tmp_result); - tmp_result = vega10_enable_ulv(hwmgr); - PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to enable ULV!", - result = tmp_result); + tmp_result = vega10_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable ULV!", + result = tmp_result); + } return result; } @@ -3082,11 +3103,22 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, performance_level->soc_clock = socclk_dep_table->entries [state_entry->ucSocClockIndexHigh].ulClk; if (gfxclk_dep_table->ucRevId == 0) { - performance_level->gfx_clock = gfxclk_dep_table->entries - [state_entry->ucGfxClockIndexHigh].ulClk; + /* under vega10 pp one vf mode, the gfx clk dpm need be lower + * to level-4 due to the limited 110w-power + */ + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) + performance_level->gfx_clock = + gfxclk_dep_table->entries[4].ulClk; + else + performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucGfxClockIndexHigh].ulClk; } else if (gfxclk_dep_table->ucRevId == 1) { patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; - performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) + performance_level->gfx_clock = patom_record_V2[4].ulClk; + else + performance_level->gfx_clock = + patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; } performance_level->mem_clock = mclk_dep_table->entries @@ -3497,6 +3529,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinGfxclkByIndex, data->smc_state_table.gfx_boot_level); + data->dpm_table.gfx_table.dpm_state.soft_min_level = data->smc_state_table.gfx_boot_level; } @@ -3520,6 +3553,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) } } + if (!hwmgr->not_vf) + return 0; + if (!data->registry_data.socclk_dpm_key_disabled) { if (data->smc_state_table.soc_boot_level != data->dpm_table.soc_table.dpm_state.soft_min_level) { @@ -3562,6 +3598,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) } } + if (!hwmgr->not_vf) + return 0; + if (!data->registry_data.socclk_dpm_key_disabled) { if (data->smc_state_table.soc_max_level != data->dpm_table.soc_table.dpm_state.soft_max_level) { @@ -3691,6 +3730,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr); /* @@ -4049,15 +4095,25 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { *mclk_mask = 0; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; + /* under vega10 pp one vf mode, the gfx clk dpm need be lower + * to level-4 due to the limited power + */ + if (hwmgr->pp_one_vf) + *sclk_mask = 4; + else + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; *soc_mask = table_info->vdd_dep_on_socclk->count - 1; *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; } + return 0; } static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { + if (!hwmgr->not_vf) + return; + switch (mode) { case AMD_FAN_CTRL_NONE: vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); @@ -4171,6 +4227,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, break; } + if (!hwmgr->not_vf) + return ret; + if (!ret) { if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); @@ -4355,14 +4414,13 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = hwmgr->backend; struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range; Watermarks_t *table = &(data->smc_state_table.water_marks_table); - int result = 0; if (!data->registry_data.disable_water_mark) { smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); data->water_marks_bitmap = WaterMarksExist; } - return result; + return 0; } static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) @@ -4475,7 +4533,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; - int i, now, size = 0; + int i, now, size = 0, count = 0; switch (type) { case PP_SCLK: @@ -4485,7 +4543,12 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); now = smum_get_argument(hwmgr); - for (i = 0; i < sclk_table->count; i++) + if (hwmgr->pp_one_vf && + (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) + count = 5; + else + count = sclk_table->count; + for (i = 0; i < count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); @@ -4696,6 +4759,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; + if (!hwmgr->not_vf) + return 0; + if (PP_CAP(PHM_PlatformCaps_ThermalController)) vega10_disable_thermal_protection(hwmgr); @@ -5098,9 +5164,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; - for (i = 0; i < podn_vdd_dep->count - 1; i++) - od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; - if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc) + for (i = 0; i < podn_vdd_dep->count; i++) od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; @@ -5249,13 +5313,11 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ PHM_PerformanceLevel *level) { const struct vega10_power_state *ps; - struct vega10_hwmgr *data; uint32_t i; if (level == NULL || hwmgr == NULL || state == NULL) return -EINVAL; - data = hwmgr->backend; ps = cast_const_phw_vega10_power_state(state); i = index > ps->performance_level_count - 1 ? @@ -5267,6 +5329,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return 0; } +static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable) +{ + struct vega10_hwmgr *data = hwmgr->backend; + uint32_t feature_mask = 0; + + if (disable) { + feature_mask |= data->smu_features[GNLD_ULV].enabled ? + data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ? + data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ? + data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ? + data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ? + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; + } else { + feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ? + data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ? + data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ? + data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ? + data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ? + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; + } + + if (feature_mask) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, + !disable, feature_mask), + "enable/disable power features for compute performance Failed!", + return -EINVAL); + + if (disable) { + data->smu_features[GNLD_ULV].enabled = false; + data->smu_features[GNLD_DS_GFXCLK].enabled = false; + data->smu_features[GNLD_DS_SOCCLK].enabled = false; + data->smu_features[GNLD_DS_LCLK].enabled = false; + data->smu_features[GNLD_DS_DCEFCLK].enabled = false; + } else { + data->smu_features[GNLD_ULV].enabled = true; + data->smu_features[GNLD_DS_GFXCLK].enabled = true; + data->smu_features[GNLD_DS_SOCCLK].enabled = true; + data->smu_features[GNLD_DS_LCLK].enabled = true; + data->smu_features[GNLD_DS_DCEFCLK].enabled = true; + } + + return 0; + +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -5334,6 +5449,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_ppfeature_status = vega10_get_ppfeature_status, .set_ppfeature_status = vega10_set_ppfeature_status, .set_mp1_state = vega10_set_mp1_state, + .disable_power_features_for_compute_performance = + vega10_disable_power_features_for_compute_performance, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 6f26cb241ecc..0a677d4bc87b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -1343,6 +1343,9 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) hwmgr->default_power_limit = hwmgr->power_limit = (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); + if (!hwmgr->not_vf) + return 0; + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { if (data->smu_features[GNLD_PPT].supported) PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 7af9ad450ac4..aca61d1ff3c2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -499,8 +499,6 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, PPCLK_e clkID, uint32_t index, uint32_t *clock) { - int result = 0; - /* *SMU expects the Clock ID to be in the top 16 bits. *Lower 16 bits specify the level @@ -512,7 +510,7 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, *clock = smum_get_argument(hwmgr); - return result; + return 0; } static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c index df6ff9252401..9b5e72bdceca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c @@ -29,7 +29,7 @@ #include "vega20_baco.h" #include "vega20_smumgr.h" - +#include "amdgpu_ras.h" static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { @@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum BACO_STATE cur_state; uint32_t data; @@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) return 0; if (state == BACO_STATE_IN) { - data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); - data |= 0x80000000; - WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - - - if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) - return -EINVAL; + if (!ras || !ras->supported) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + + if(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnterBaco, 0)) + return -EINVAL; + } else { + if(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnterBaco, 1)) + return -EINVAL; + } } else if (state == BACO_STATE_OUT) { if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f5915308e643..5bcf0d684151 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TablelessHardwareInterface); phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_BACO); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableSMU7ThermalManagement); if (adev->pg_flags & AMD_PG_SUPPORT_UVD) @@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) "Failed to init sclk threshold!", return ret); - if (adev->in_baco_reset) { - adev->in_baco_reset = 0; + if (adev->in_gpu_reset && + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) { ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); if (ret) @@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) return res; } +static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, + enum pp_df_cstate state) +{ + int ret; + + /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ + if (hwmgr->smu_version < 0x283200) { + pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); + return -EINVAL; + } + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state); + if (ret) + pr_err("SetDfCstate failed!\n"); + + return ret; +} + +static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, + uint32_t pstate) +{ + int ret; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetXgmiMode, + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + if (ret) + pr_err("SetXgmiPstate failed!\n"); + + return ret; +} + static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* init/fini related */ .backend_init = vega20_hwmgr_backend_init, @@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .set_asic_baco_state = vega20_baco_set_state, .set_mp1_state = vega20_set_mp1_state, .smu_i2c_bus_access = vega20_smu_i2c_bus_access, + .set_df_cstate = vega20_set_df_cstate, + .set_xgmi_pstate = vega20_set_xgmi_pstate, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 23171a4d9a31..ca3fdc6777cf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -259,10 +259,8 @@ struct smu_table_context struct smu_bios_boot_up_values boot_values; void *driver_pptable; struct smu_table *tables; - uint32_t table_count; struct smu_table memory_pool; uint8_t thermal_controller_type; - uint16_t TDPODLimit; void *overdrive_table; }; @@ -284,6 +282,7 @@ struct smu_power_gate { bool uvd_gated; bool vce_gated; bool vcn_gated; + bool jpeg_gated; }; struct smu_power_context { @@ -322,6 +321,13 @@ struct mclock_latency_table { struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; }; +enum smu_reset_mode +{ + SMU_RESET_MODE_0, + SMU_RESET_MODE_1, + SMU_RESET_MODE_2, +}; + enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, @@ -341,7 +347,6 @@ struct smu_context struct amdgpu_device *adev; struct amdgpu_irq_src *irq_source; - const struct smu_funcs *funcs; const struct pptable_funcs *ppt_funcs; struct mutex mutex; struct mutex sensor_lock; @@ -382,11 +387,15 @@ struct smu_context uint32_t power_profile_mode; uint32_t default_power_profile_mode; bool pm_enabled; + bool is_apu; uint32_t smc_if_version; + bool uploading_custom_pp_table; }; +struct i2c_adapter; + struct pptable_funcs { int (*alloc_dpm_context)(struct smu_context *smu); int (*store_powerplay_table)(struct smu_context *smu); @@ -398,7 +407,7 @@ struct pptable_funcs { int (*get_smu_table_index)(struct smu_context *smu, uint32_t index); int (*get_smu_power_index)(struct smu_context *smu, uint32_t index); int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile); - int (*run_afll_btc)(struct smu_context *smu); + int (*run_btc)(struct smu_context *smu); int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); int (*set_default_dpm_table)(struct smu_context *smu); @@ -428,6 +437,7 @@ struct pptable_funcs { int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable); int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable); + int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable); int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size); int (*pre_display_config_changed)(struct smu_context *smu); @@ -459,17 +469,19 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); void (*dump_pptable)(struct smu_context *smu); int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default); - int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max); -}; - -struct smu_funcs -{ + int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t dpm_level, uint32_t *freq); + int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); + int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap); + int (*i2c_eeprom_init)(struct i2c_adapter *control); + void (*i2c_eeprom_fini)(struct i2c_adapter *control); + int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); int (*init_microcode)(struct smu_context *smu); + int (*load_microcode)(struct smu_context *smu); int (*init_smc_tables)(struct smu_context *smu); int (*fini_smc_tables)(struct smu_context *smu); int (*init_power)(struct smu_context *smu); int (*fini_power)(struct smu_context *smu); - int (*load_microcode)(struct smu_context *smu); int (*check_fw_status)(struct smu_context *smu); int (*setup_pptable)(struct smu_context *smu); int (*get_vbios_bootup_values)(struct smu_context *smu); @@ -480,16 +492,16 @@ struct smu_funcs int (*check_fw_version)(struct smu_context *smu); int (*powergate_sdma)(struct smu_context *smu, bool gate); int (*powergate_vcn)(struct smu_context *smu, bool gate); + int (*powergate_jpeg)(struct smu_context *smu, bool gate); int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); int (*write_pptable)(struct smu_context *smu); int (*set_min_dcef_deep_sleep)(struct smu_context *smu); int (*set_tool_table_location)(struct smu_context *smu); int (*notify_memory_pool_location)(struct smu_context *smu); - int (*write_watermarks_table)(struct smu_context *smu); int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); - int (*send_smc_msg)(struct smu_context *smu, uint16_t msg); - int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param); + int (*send_smc_msg_with_param)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param); int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); @@ -499,8 +511,7 @@ struct smu_funcs int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value); int (*init_max_sustainable_clocks)(struct smu_context *smu); int (*start_thermal_control)(struct smu_context *smu); - int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, - void *data, uint32_t *size); + int (*stop_thermal_control)(struct smu_context *smu); int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk); int (*set_active_display_count)(struct smu_context *smu, uint32_t count); int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time, @@ -522,8 +533,6 @@ struct smu_funcs int (*get_current_shallow_sleep_clocks)(struct smu_context *smu, struct smu_clock_info *clocks); int (*notify_smu_enable_pwe)(struct smu_context *smu); - int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, - struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); @@ -537,235 +546,93 @@ struct smu_funcs bool (*baco_is_support)(struct smu_context *smu); enum smu_baco_state (*baco_get_state)(struct smu_context *smu); int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); - int (*baco_reset)(struct smu_context *smu); + int (*baco_enter)(struct smu_context *smu); + int (*baco_exit)(struct smu_context *smu); + int (*mode2_reset)(struct smu_context *smu); int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*override_pcie_parameters)(struct smu_context *smu); + uint32_t (*get_pptable_power_limit)(struct smu_context *smu); }; -#define smu_init_microcode(smu) \ - ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0) -#define smu_init_smc_tables(smu) \ - ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0) -#define smu_fini_smc_tables(smu) \ - ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0) -#define smu_init_power(smu) \ - ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0) -#define smu_fini_power(smu) \ - ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0) -#define smu_load_microcode(smu) \ - ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0) -#define smu_check_fw_status(smu) \ - ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0) -#define smu_setup_pptable(smu) \ - ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0) -#define smu_powergate_sdma(smu, gate) \ - ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0) -#define smu_powergate_vcn(smu, gate) \ - ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0) -#define smu_set_gfx_cgpg(smu, enabled) \ - ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0) -#define smu_get_vbios_bootup_values(smu) \ - ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0) -#define smu_get_clk_info_from_vbios(smu) \ - ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0) -#define smu_check_pptable(smu) \ - ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0) -#define smu_parse_pptable(smu) \ - ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0) -#define smu_populate_smc_tables(smu) \ - ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0) -#define smu_check_fw_version(smu) \ - ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0) -#define smu_write_pptable(smu) \ - ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0) -#define smu_set_min_dcef_deep_sleep(smu) \ - ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0) -#define smu_set_tool_table_location(smu) \ - ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0) -#define smu_notify_memory_pool_location(smu) \ - ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0) -#define smu_gfx_off_control(smu, enable) \ - ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0) - -#define smu_write_watermarks_table(smu) \ - ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0) -#define smu_set_last_dcef_min_deep_sleep_clk(smu) \ - ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0) -#define smu_system_features_control(smu, en) \ - ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0) -#define smu_init_max_sustainable_clocks(smu) \ - ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) -#define smu_set_default_od_settings(smu, initialize) \ - ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_set_fan_speed_rpm(smu, speed) \ - ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) -#define smu_send_smc_msg(smu, msg) \ - ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0) -#define smu_send_smc_msg_with_param(smu, msg, param) \ - ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) -#define smu_read_smc_arg(smu, arg) \ - ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0) -#define smu_alloc_dpm_context(smu) \ - ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) -#define smu_init_display_count(smu, count) \ - ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0) -#define smu_feature_set_allowed_mask(smu) \ - ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0) -#define smu_feature_get_enabled_mask(smu, mask, num) \ - ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0) -#define smu_is_dpm_running(smu) \ - ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) -#define smu_notify_display_change(smu) \ - ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0) -#define smu_store_powerplay_table(smu) \ - ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0) -#define smu_check_powerplay_table(smu) \ - ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0) -#define smu_append_powerplay_table(smu) \ - ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0) -#define smu_set_default_dpm_table(smu) \ - ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0) -#define smu_populate_umd_state_clk(smu) \ - ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) -#define smu_set_default_od8_settings(smu) \ - ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) -#define smu_get_power_limit(smu, limit, def) \ - ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0) -#define smu_set_power_limit(smu, limit) \ - ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) -#define smu_get_current_clk_freq(smu, clk_id, value) \ - ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) -#define smu_print_clk_levels(smu, clk_type, buf) \ - ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) -#define smu_force_clk_levels(smu, clk_type, level) \ - ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0) -#define smu_get_od_percentage(smu, type) \ - ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0) -#define smu_set_od_percentage(smu, type, value) \ - ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0) -#define smu_od_edit_dpm_table(smu, type, input, size) \ - ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0) -#define smu_tables_init(smu, tab) \ - ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) -#define smu_set_thermal_fan_table(smu) \ - ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) -#define smu_start_thermal_control(smu) \ - ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0) -#define smu_read_sensor(smu, sensor, data, size) \ - ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0) -#define smu_smc_read_sensor(smu, sensor, data, size) \ - ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) -#define smu_get_power_profile_mode(smu, buf) \ - ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0) -#define smu_set_power_profile_mode(smu, param, param_size) \ - ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) -#define smu_pre_display_config_changed(smu) \ - ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) -#define smu_display_config_changed(smu) \ - ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) -#define smu_apply_clocks_adjust_rules(smu) \ - ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) -#define smu_notify_smc_dispaly_config(smu) \ - ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) -#define smu_force_dpm_limit_value(smu, highest) \ - ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) -#define smu_unforce_dpm_levels(smu) \ - ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) -#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ - ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) -#define smu_set_cpu_power_state(smu) \ - ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) -#define smu_get_fan_control_mode(smu) \ - ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0) -#define smu_set_fan_control_mode(smu, value) \ - ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0) -#define smu_get_fan_speed_percent(smu, speed) \ - ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) -#define smu_set_fan_speed_percent(smu, speed) \ - ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) -#define smu_get_fan_speed_rpm(smu, speed) \ - ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) - -#define smu_msg_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_clk_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_feature_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_table_get_index(smu, tab) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) -#define smu_power_get_index(smu, src) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) -#define smu_workload_get_type(smu, profile) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) -#define smu_run_afll_btc(smu) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0) -#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) -#define smu_set_deep_sleep_dcefclk(smu, clk) \ - ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0) -#define smu_set_active_display_count(smu, count) \ - ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0) -#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ - ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) -#define smu_get_clock_by_type(smu, type, clocks) \ - ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0) -#define smu_get_max_high_clocks(smu, clocks) \ - ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0) -#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0) -#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0) -#define smu_display_clock_voltage_request(smu, clock_req) \ - ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0) -#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \ - ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL) -#define smu_get_dal_power_level(smu, clocks) \ - ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0) -#define smu_get_perf_level(smu, designation, level) \ - ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0) -#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ - ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) -#define smu_notify_smu_enable_pwe(smu) \ - ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0) -#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \ - ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0) -#define smu_dpm_set_uvd_enable(smu, enable) \ - ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) -#define smu_dpm_set_vce_enable(smu, enable) \ - ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) -#define smu_set_xgmi_pstate(smu, pstate) \ - ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) -#define smu_set_watermarks_table(smu, tab, clock_ranges) \ - ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) -#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ - ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) -#define smu_thermal_temperature_range_update(smu, range, rw) \ - ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) -#define smu_get_thermal_temperature_range(smu, range) \ - ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) -#define smu_register_irq_handler(smu) \ - ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) -#define smu_set_azalia_d3_pme(smu) \ - ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) -#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ - ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) -#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ - ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) -#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ - ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) -#define smu_baco_is_support(smu) \ - ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false) -#define smu_baco_get_state(smu, state) \ - ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) -#define smu_baco_reset(smu) \ - ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) -#define smu_asic_set_performance_level(smu, level) \ - ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); -#define smu_dump_pptable(smu) \ - ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) -#define smu_get_dpm_uclk_limited(smu, clock, max) \ - ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL) +int smu_load_microcode(struct smu_context *smu); + +int smu_check_fw_status(struct smu_context *smu); + +int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + +#define smu_i2c_eeprom_init(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL) +#define smu_i2c_eeprom_fini(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL) + +int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); + +int smu_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool def, + bool lock_needed); + +int smu_set_power_limit(struct smu_context *smu, uint32_t limit); +int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); +int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type); +int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value); + +int smu_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); + +int smu_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); +int smu_get_power_profile_mode(struct smu_context *smu, char *buf); + +int smu_set_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size, + bool lock_needed); +int smu_get_fan_control_mode(struct smu_context *smu); +int smu_set_fan_control_mode(struct smu_context *smu, int value); +int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed); +int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); +int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed); + +int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk); +int smu_set_active_display_count(struct smu_context *smu, uint32_t count); + +int smu_get_clock_by_type(struct smu_context *smu, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks); + +int smu_get_max_high_clocks(struct smu_context *smu, + struct amd_pp_simple_clock_info *clocks); + +int smu_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks); + +int smu_get_clock_by_type_with_voltage(struct smu_context *smu, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); + +int smu_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request *clock_req); +int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch); +int smu_notify_smu_enable_pwe(struct smu_context *smu); + +int smu_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_set_azalia_d3_pme(struct smu_context *smu); + +bool smu_baco_is_support(struct smu_context *smu); + +int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); + +int smu_baco_enter(struct smu_context *smu); +int smu_baco_exit(struct smu_context *smu); +int smu_mode2_reset(struct smu_context *smu); extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, @@ -799,6 +666,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table); int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size); int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info); enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu); +int smu_write_watermarks_table(struct smu_context *smu); +int smu_set_watermarks_for_clock_ranges( + struct smu_context *smu, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); /* smu to display interface */ extern int smu_display_configuration_change(struct smu_context *smu, const @@ -809,7 +680,8 @@ extern int smu_get_current_clocks(struct smu_context *smu, extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); extern int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id); + enum amd_pp_task task_id, + bool lock_needed); int smu_switch_power_profile(struct smu_context *smu, enum PP_SMC_POWER_PROFILE type, bool en); @@ -819,7 +691,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max); + uint32_t *min, uint32_t *max, bool lock_needed); int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -828,10 +700,29 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); +int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask, + bool lock_needed); +int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state); +int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state); + +int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +int smu_get_uclk_dpm_states(struct smu_context *smu, + unsigned int *clock_values_in_khz, + unsigned int *num_states); + +int smu_get_dpm_clock_table(struct smu_context *smu, + struct dpm_clocks *clock_table); + +uint32_t smu_get_pptable_power_limit(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h index 78e5927b7711..e3291259b249 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h @@ -95,8 +95,7 @@ //BTC #define PPSMC_MSG_RunAfllBtc 0x30 -#define PPSMC_MSG_RunGfxDcBtc 0x31 -#define PPSMC_MSG_RunSocDcBtc 0x32 +#define PPSMC_MSG_RunDcBtc 0x31 //Debug #define PPSMC_MSG_DramLogSetDramAddrHigh 0x33 diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7bf9a14bfa0b..2ffb666b97e6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -355,6 +355,10 @@ struct pp_hwmgr_func { int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode); int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire); + int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state); + int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); + int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, + bool disable); }; struct pp_table_func { @@ -737,6 +741,7 @@ struct pp_hwmgr { uint32_t smu_version; bool not_vf; bool pm_en; + bool pp_one_vf; struct mutex smu_lock; uint32_t pp_table_version; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h index e02950b505fa..a886f0644d24 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -137,29 +137,29 @@ #define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) #define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT ) #define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT ) -#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT ) +#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT ) #define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT ) #define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT ) #define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) #define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) -#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT ) +#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT ) #define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) #define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) #define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) #define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) #define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) -#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT ) -#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK ) +#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT ) +#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT ) //FIXME need updating // Debug Overrides Bitmask #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002 // I2C Config Bit Defines #define I2C_CONTROLLER_ENABLED 1 @@ -423,18 +423,30 @@ typedef enum { } PwrConfig_e; typedef enum { - XGMI_LINK_RATE_12 = 0, // 12Gbps - XGMI_LINK_RATE_16, // 16Gbps - XGMI_LINK_RATE_22, // 22Gbps - XGMI_LINK_RATE_25, // 25Gbps + XGMI_LINK_RATE_2 = 2, // 2Gbps + XGMI_LINK_RATE_4 = 4, // 4Gbps + XGMI_LINK_RATE_8 = 8, // 8Gbps + XGMI_LINK_RATE_12 = 12, // 12Gbps + XGMI_LINK_RATE_16 = 16, // 16Gbps + XGMI_LINK_RATE_17 = 17, // 17Gbps + XGMI_LINK_RATE_18 = 18, // 18Gbps + XGMI_LINK_RATE_19 = 19, // 19Gbps + XGMI_LINK_RATE_20 = 20, // 20Gbps + XGMI_LINK_RATE_21 = 21, // 21Gbps + XGMI_LINK_RATE_22 = 22, // 22Gbps + XGMI_LINK_RATE_23 = 23, // 23Gbps + XGMI_LINK_RATE_24 = 24, // 24Gbps + XGMI_LINK_RATE_25 = 25, // 25Gbps XGMI_LINK_RATE_COUNT } XGMI_LINK_RATE_e; typedef enum { - XGMI_LINK_WIDTH_2 = 0, // x2 - XGMI_LINK_WIDTH_4, // x4 - XGMI_LINK_WIDTH_8, // x8 - XGMI_LINK_WIDTH_16, // x16 + XGMI_LINK_WIDTH_1 = 1, // x1 + XGMI_LINK_WIDTH_2 = 2, // x2 + XGMI_LINK_WIDTH_4 = 4, // x4 + XGMI_LINK_WIDTH_8 = 8, // x8 + XGMI_LINK_WIDTH_9 = 9, // x9 + XGMI_LINK_WIDTH_16 = 16, // x16 XGMI_LINK_WIDTH_COUNT } XGMI_LINK_WIDTH_e; @@ -696,7 +708,11 @@ typedef struct { uint8_t GpioI2cSda; // Serial Data uint16_t GpioPadding; - uint32_t BoardReserved[9]; + // Platform input telemetry voltage coefficient + uint32_t BoardVoltageCoeffA; // decode by /1000 + uint32_t BoardVoltageCoeffB; // decode by /1000 + + uint32_t BoardReserved[7]; // Padding for MMHUB - do not modify this uint32_t MmHubPadding[8]; // SMU internal use @@ -802,7 +818,7 @@ typedef struct { uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units - uint32_t EnabledAvfsModules[2]; + uint32_t EnabledAvfsModules[3]; uint32_t MmHubPadding[8]; // SMU internal use } AvfsFuseOverride_t; @@ -865,7 +881,8 @@ typedef struct { //#define TABLE_ACTIVITY_MONITOR_COEFF 7 #define TABLE_OVERDRIVE 7 #define TABLE_WAFL_XGMI_TOPOLOGY 8 -#define TABLE_COUNT 9 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_COUNT 10 // These defines are used with the SMC_MSG_SetUclkFastSwitch message. typedef enum { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index b0dd05d431dd..d8c9b7f91fcc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -114,6 +114,7 @@ __SMU_DUMMY_MAP(PowerDownJpeg), \ __SMU_DUMMY_MAP(BacoAudioD3PME), \ __SMU_DUMMY_MAP(ArmD3), \ + __SMU_DUMMY_MAP(RunDcBtc), \ __SMU_DUMMY_MAP(RunGfxDcBtc), \ __SMU_DUMMY_MAP(RunSocDcBtc), \ __SMU_DUMMY_MAP(SetMemoryChannelEnable), \ @@ -168,6 +169,7 @@ __SMU_DUMMY_MAP(PowerGateAtHub), \ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ + __SMU_DUMMY_MAP(DFCstateControl), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -251,6 +253,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \ __SMU_DUMMY_MAP(MMHUB_PG), \ __SMU_DUMMY_MAP(ATHUB_PG), \ + __SMU_DUMMY_MAP(APCC_DFLL), \ __SMU_DUMMY_MAP(WAFL_CG), #undef __SMU_DUMMY_MAP diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 5bda8539447a..786de7741990 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -27,7 +27,7 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_VG20 0x13 -#define SMU11_DRIVER_IF_VERSION_ARCT 0x09 +#define SMU11_DRIVER_IF_VERSION_ARCT 0x10 #define SMU11_DRIVER_IF_VERSION_NV10 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x34 @@ -48,6 +48,8 @@ #define SMU11_TOOL_SIZE 0x19000 +#define MAX_PCIE_CONF 2 + #define CLK_MAP(clk, index) \ [SMU_##clk] = {1, (index)} @@ -88,6 +90,11 @@ struct smu_11_0_dpm_table { uint32_t max; /* MHz */ }; +struct smu_11_0_pcie_table { + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; +}; + struct smu_11_0_dpm_tables { struct smu_11_0_dpm_table soc_table; struct smu_11_0_dpm_table gfx_table; @@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables { struct smu_11_0_dpm_table display_table; struct smu_11_0_dpm_table phy_table; struct smu_11_0_dpm_table fclk_table; + struct smu_11_0_pcie_table pcie_table; }; struct smu_11_0_dpm_context { @@ -130,6 +138,128 @@ enum smu_v11_0_baco_seq { BACO_SEQ_COUNT, }; -void smu_v11_0_set_smu_funcs(struct smu_context *smu); +int smu_v11_0_init_microcode(struct smu_context *smu); + +int smu_v11_0_load_microcode(struct smu_context *smu); + +int smu_v11_0_init_smc_tables(struct smu_context *smu); + +int smu_v11_0_fini_smc_tables(struct smu_context *smu); + +int smu_v11_0_init_power(struct smu_context *smu); + +int smu_v11_0_fini_power(struct smu_context *smu); + +int smu_v11_0_check_fw_status(struct smu_context *smu); + +int smu_v11_0_setup_pptable(struct smu_context *smu); + +int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu); + +int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu); + +int smu_v11_0_check_pptable(struct smu_context *smu); + +int smu_v11_0_parse_pptable(struct smu_context *smu); + +int smu_v11_0_populate_smc_pptable(struct smu_context *smu); + +int smu_v11_0_check_fw_version(struct smu_context *smu); + +int smu_v11_0_write_pptable(struct smu_context *smu); + +int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu); + +int smu_v11_0_set_tool_table_location(struct smu_context *smu); + +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); + +int smu_v11_0_system_features_control(struct smu_context *smu, + bool en); + +int +smu_v11_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, + uint32_t param); + +int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); + +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); + +int smu_v11_0_set_allowed_mask(struct smu_context *smu); + +int smu_v11_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num); + +int smu_v11_0_notify_display_change(struct smu_context *smu); + +int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n); + +int smu_v11_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value); + +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); + +int smu_v11_0_start_thermal_control(struct smu_context *smu); + +int smu_v11_0_stop_thermal_control(struct smu_context *smu); + +int smu_v11_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); + +int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); + +int +smu_v11_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req); + +uint32_t +smu_v11_0_get_fan_control_mode(struct smu_context *smu); + +int +smu_v11_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode); + +int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed); + +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v11_0_register_irq_handler(struct smu_context *smu); + +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); + +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +bool smu_v11_0_baco_is_support(struct smu_context *smu); + +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); + +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); + +int smu_v11_0_baco_enter(struct smu_context *smu); +int smu_v11_0_baco_exit(struct smu_context *smu); + +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v11_0_override_pcie_parameters(struct smu_context *smu); + +int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size); + +uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h index 86cdc3393eac..b2f96a101124 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h @@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table struct smu_11_0_power_saving_clock_table power_saving_clock; struct smu_11_0_overdrive_table overdrive_table; +#ifndef SMU_11_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h +#endif } __attribute__((packed)); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index acf3db12f59f..3f1cd06e273c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -37,6 +37,57 @@ struct smu_12_0_cmn2aisc_mapping { int map_to; }; -void smu_v12_0_set_smu_funcs(struct smu_context *smu); +int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, + uint16_t msg); + +int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); + +int smu_v12_0_wait_for_response(struct smu_context *smu); + +int +smu_v12_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, + uint32_t param); + +int smu_v12_0_check_fw_status(struct smu_context *smu); + +int smu_v12_0_check_fw_version(struct smu_context *smu); + +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate); + +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate); + +int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate); + +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable); + +int smu_v12_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); + +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu); + +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v12_0_init_smc_tables(struct smu_context *smu); + +int smu_v12_0_fini_smc_tables(struct smu_context *smu); + +int smu_v12_0_populate_smc_tables(struct smu_context *smu); + +int smu_v12_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num); + +int smu_v12_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value); + +int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v12_0_mode2_reset(struct smu_context *smu); + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h index a0883038f3c3..0c66f0fe1aaf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h @@ -120,7 +120,8 @@ #define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D #define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F #define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60 -#define PPSMC_Message_Count 0x61 +#define PPSMC_MSG_DFCstateControl 0x63 +#define PPSMC_Message_Count 0x64 typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 0b461404af6b..15403b7979d6 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -26,6 +26,7 @@ #include <linux/pci.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -35,6 +36,7 @@ #include "navi10_ppt.h" #include "smu_v11_0_pptable.h" #include "smu_v11_0_ppsmc.h" +#include "nbio/nbio_7_4_sh_mask.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" @@ -177,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN FEA_MAP(TEMP_DEPENDENT_VMIN), FEA_MAP(MMHUB_PG), FEA_MAP(ATHUB_PG), + FEA_MAP(APCC_DFLL), }; static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = { @@ -205,7 +208,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; @@ -327,40 +330,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, memset(feature_mask, 0, sizeof(uint32_t) * num); *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) - | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) - | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) - | FEATURE_MASK(FEATURE_DPM_LINK_BIT) - | FEATURE_MASK(FEATURE_GFX_ULV_BIT) | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT) | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) | FEATURE_MASK(FEATURE_PPT_BIT) | FEATURE_MASK(FEATURE_TDC_BIT) | FEATURE_MASK(FEATURE_GFX_EDC_BIT) + | FEATURE_MASK(FEATURE_APCC_PLUS_BIT) | FEATURE_MASK(FEATURE_VR0HOT_BIT) | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT) | FEATURE_MASK(FEATURE_THERMAL_BIT) | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT) - | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) - | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT) + | FEATURE_MASK(FEATURE_DS_LCLK_BIT) | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_FW_DSTATE_BIT) | FEATURE_MASK(FEATURE_BACO_BIT) | FEATURE_MASK(FEATURE_ACDC_BIT) | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) - | FEATURE_MASK(FEATURE_FW_CTF_BIT); + | FEATURE_MASK(FEATURE_FW_CTF_BIT) + | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); + + if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + + if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); + + if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT); if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT); - if (adev->pm.pp_feature & PP_GFXOFF_MASK) { + if (adev->pm.pp_feature & PP_ULV_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (adev->pm.pp_feature & PP_GFXOFF_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); - /* TODO: remove it once fw fix the bug */ - *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT); - } if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT); @@ -369,8 +384,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT); if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT) - | FEATURE_MASK(FEATURE_JPEG_PG_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT); + + if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT); /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */ if (is_asic_secure(smu)) { @@ -585,6 +602,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; PPTable_t *driver_ppt = NULL; + int i; driver_ppt = table_context->driver_pptable; @@ -615,6 +633,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0]; dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1]; + for (i = 0; i < MAX_PCIE_CONF; i++) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i]; + } + return 0; } @@ -644,6 +667,31 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) return ret; } +static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg); + if (ret) + return ret; + } + power_gate->jpeg_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg); + if (ret) + return ret; + } + power_gate->jpeg_gated = true; + } + + return ret; +} + static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) @@ -677,13 +725,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu return dpm_desc->SnapToDiscrete == 0 ? true : false; } +static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature) +{ + return od_table->cap[feature]; +} + + static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { + uint16_t *curve_settings; int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; + struct smu_table_context *table_context = &smu->smu_table; + uint32_t gen_speed, lane_width; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct amdgpu_device *adev = smu->adev; + PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable; + OverDriveTable_t *od_table = + (OverDriveTable_t *)table_context->overdrive_table; + struct smu_11_0_overdrive_table *od_settings = smu->od_settings; switch (clk_type) { case SMU_GFXCLK: @@ -734,6 +798,69 @@ static int navi10_print_clk_levels(struct smu_context *smu, } break; + case SMU_PCIE: + gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + for (i = 0; i < NUM_LINK_LEVELS; i++) + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," : + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," : + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," : + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "", + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "", + pptable->LclkFreq[i], + (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) && + (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ? + "*" : ""); + break; + case SMU_OD_SCLK: + if (!smu->od_enabled || !od_table || !od_settings) + break; + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) + break; + size += sprintf(buf + size, "OD_SCLK:\n"); + size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax); + break; + case SMU_OD_MCLK: + if (!smu->od_enabled || !od_table || !od_settings) + break; + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) + break; + size += sprintf(buf + size, "OD_MCLK:\n"); + size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax); + break; + case SMU_OD_VDDC_CURVE: + if (!smu->od_enabled || !od_table || !od_settings) + break; + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) + break; + size += sprintf(buf + size, "OD_VDDC_CURVE:\n"); + for (i = 0; i < 3; i++) { + switch (i) { + case 0: + curve_settings = &od_table->GfxclkFreq1; + break; + case 1: + curve_settings = &od_table->GfxclkFreq2; + break; + case 2: + curve_settings = &od_table->GfxclkFreq3; + break; + default: + break; + } + size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE); + } + break; default: break; } @@ -759,6 +886,12 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_DCEFCLK: case SMU_FCLK: + /* There is only 2 levels for fine grained DPM */ + if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } + ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) return size; @@ -783,13 +916,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) int ret = 0; uint32_t min_sclk_freq = 0, min_mclk_freq = 0; - ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL); + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false); if (ret) return ret; smu->pstate_sclk = min_sclk_freq * 100; - ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL); + ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false); if (ret) return ret; @@ -842,7 +975,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { - ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq); + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false); if (ret) return ret; ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq); @@ -892,7 +1025,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) for (i = 0; i < ARRAY_SIZE(clks); i++) { clk_type = clks[i]; - ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); if (ret) return ret; @@ -919,7 +1052,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) for (i = 0; i < ARRAY_SIZE(clks); i++) { clk_type = clks[i]; - ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); if (ret) return ret; @@ -1254,7 +1387,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; - if (!smu_display_clock_voltage_request(smu, &clock_req)) { + + ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req); + if (!ret) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, @@ -1408,7 +1543,7 @@ static int navi10_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -1451,18 +1586,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->pdev->revision) { - case 0xf0: /* XTX */ - case 0xc0: - sclk_freq = NAVI10_PEAK_SCLK_XTX; - break; - case 0xf1: /* XT */ - case 0xc1: - sclk_freq = NAVI10_PEAK_SCLK_XT; + switch (adev->asic_type) { + case CHIP_NAVI10: + switch (adev->pdev->revision) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } break; - default: /* XL */ - sclk_freq = NAVI10_PEAK_SCLK_XL; + case CHIP_NAVI14: + switch (adev->pdev->revision) { + case 0xc7: /* XT */ + case 0xf4: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK; + break; + case 0xc1: /* XTM */ + case 0xf2: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK; + break; + case 0xc3: /* XLM */ + case 0xf3: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK; + break; + case 0xc5: /* XTX */ + case 0xf6: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK; + break; + default: /* XL */ + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK; + break; + } break; + default: + return -EINVAL; } ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); @@ -1485,10 +1649,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { int ret = 0; - struct amdgpu_device *adev = smu->adev; - - if (adev->asic_type != CHIP_NAVI10) - return -EINVAL; switch (level) { case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -1541,17 +1701,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, return ret; } +static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; +} + static int navi10_get_power_limit(struct smu_context *smu, uint32_t *limit, - bool asic_default) + bool cap) { PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t asic_default_power_limit = 0; int ret = 0; int power_src; - if (!smu->default_power_limit || - !smu->power_limit) { + if (!smu->power_limit) { if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); if (power_src < 0) @@ -1574,23 +1739,291 @@ static int navi10_get_power_limit(struct smu_context *smu, pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - if (smu->od_enabled) { - asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit); - asic_default_power_limit /= 100; - } - - smu->default_power_limit = asic_default_power_limit; smu->power_limit = asic_default_power_limit; } - if (asic_default) - *limit = smu->default_power_limit; + if (cap) + *limit = smu_v11_0_get_max_power_limit(smu); else *limit = smu->power_limit; return 0; } +static int navi10_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret, i; + uint32_t smu_pcie_arg; + + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | + ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : + (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? + pptable->PcieLaneCount[i] : pcie_width_cap); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg); + + if (ret) + return ret; + + if (pptable->PcieGenSpeed[i] > pcie_gen_cap) + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; + if (pptable->PcieLaneCount[i] > pcie_width_cap) + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + } + + return 0; +} + +static inline void navi10_dump_od_table(OverDriveTable_t *od_table) { + pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax); + pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1); + pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2); + pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3); + pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax); + pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct); +} + +static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value) +{ + if (value < od_table->min[setting]) { + pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]); + return -EINVAL; + } + if (value > od_table->max[setting]) { + pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]); + return -EINVAL; + } + return 0; +} + +static int navi10_setup_od_limits(struct smu_context *smu) { + struct smu_11_0_overdrive_table *overdrive_table = NULL; + struct smu_11_0_powerplay_table *powerplay_table = NULL; + + if (!smu->smu_table.power_play_table) { + pr_err("powerplay table uninitialized!\n"); + return -ENOENT; + } + powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; + overdrive_table = &powerplay_table->overdrive_table; + if (!smu->od_settings) { + smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL); + } else { + memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table)); + } + return 0; +} + +static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { + OverDriveTable_t *od_table; + int ret = 0; + + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); + if (ret) + return ret; + + if (initialize) { + ret = navi10_setup_od_limits(smu); + if (ret) { + pr_err("Failed to retrieve board OD limits\n"); + return ret; + } + + } + + od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; + if (od_table) { + navi10_dump_od_table(od_table); + } + + return ret; +} + +static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { + int i; + int ret = 0; + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTable_t *od_table; + struct smu_11_0_overdrive_table *od_settings; + enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting; + uint16_t *freq_ptr, *voltage_ptr; + od_table = (OverDriveTable_t *)table_context->overdrive_table; + + if (!smu->od_enabled) { + pr_warn("OverDrive is not enabled!\n"); + return -EINVAL; + } + + if (!smu->od_settings) { + pr_err("OD board limits are not set!\n"); + return -ENOENT; + } + + od_settings = smu->od_settings; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) { + pr_warn("GFXCLK_LIMITS not supported!\n"); + return -ENOTSUPP; + } + if (!table_context->overdrive_table) { + pr_err("Overdrive is not initialized\n"); + return -EINVAL; + } + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + pr_info("invalid number of input parameters %d\n", size); + return -EINVAL; + } + switch (input[i]) { + case 0: + freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN; + freq_ptr = &od_table->GfxclkFmin; + if (input[i + 1] > od_table->GfxclkFmax) { + pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n", + input[i + 1], + od_table->GfxclkFmin); + return -EINVAL; + } + break; + case 1: + freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX; + freq_ptr = &od_table->GfxclkFmax; + if (input[i + 1] < od_table->GfxclkFmin) { + pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n", + input[i + 1], + od_table->GfxclkFmax); + return -EINVAL; + } + break; + default: + pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); + pr_info("Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]); + if (ret) + return ret; + *freq_ptr = input[i + 1]; + } + break; + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) { + pr_warn("UCLK_MAX not supported!\n"); + return -ENOTSUPP; + } + if (size < 2) { + pr_info("invalid number of parameters: %d\n", size); + return -EINVAL; + } + if (input[0] != 1) { + pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]); + pr_info("Supported indices: [1:max]\n"); + return -EINVAL; + } + ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]); + if (ret) + return ret; + od_table->UclkFmax = input[1]; + break; + case PP_OD_COMMIT_DPM_TABLE: + navi10_dump_od_table(od_table); + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true); + if (ret) { + pr_err("Failed to import overdrive table!\n"); + return ret; + } + // no lock needed because smu_od_edit_dpm_table has it + ret = smu_handle_task(smu, smu->smu_dpm.dpm_level, + AMD_PP_TASK_READJUST_POWER_STATE, + false); + if (ret) { + return ret; + } + break; + case PP_OD_EDIT_VDDC_CURVE: + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) { + pr_warn("GFXCLK_CURVE not supported!\n"); + return -ENOTSUPP; + } + if (size < 3) { + pr_info("invalid number of parameters: %d\n", size); + return -EINVAL; + } + if (!od_table) { + pr_info("Overdrive is not initialized\n"); + return -EINVAL; + } + + switch (input[0]) { + case 0: + freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1; + voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1; + freq_ptr = &od_table->GfxclkFreq1; + voltage_ptr = &od_table->GfxclkVolt1; + break; + case 1: + freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2; + voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2; + freq_ptr = &od_table->GfxclkFreq2; + voltage_ptr = &od_table->GfxclkVolt2; + break; + case 2: + freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3; + voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3; + freq_ptr = &od_table->GfxclkFreq3; + voltage_ptr = &od_table->GfxclkVolt3; + break; + default: + pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]); + pr_info("Supported indices: [0, 1, 2]\n"); + return -EINVAL; + } + ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]); + if (ret) + return ret; + // Allow setting zero to disable the OverDrive VDDC curve + if (input[2] != 0) { + ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]); + if (ret) + return ret; + *freq_ptr = input[1]; + *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE; + pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr); + } else { + // If setting 0, disable all voltage curve settings + od_table->GfxclkVolt1 = 0; + od_table->GfxclkVolt2 = 0; + od_table->GfxclkVolt3 = 0; + } + navi10_dump_od_table(od_table); + break; + default: + return -ENOSYS; + } + return ret; +} + +static int navi10_run_btc(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); + if (ret) + pr_err("RunBtc failed!\n"); + + return ret; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1606,6 +2039,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_allowed_feature_mask = navi10_get_allowed_feature_mask, .set_default_dpm_table = navi10_set_default_dpm_table, .dpm_set_uvd_enable = navi10_dpm_set_uvd_enable, + .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable, .get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table, .print_clk_levels = navi10_print_clk_levels, .force_clk_levels = navi10_force_clk_levels, @@ -1629,12 +2063,63 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_thermal_temperature_range = navi10_get_thermal_temperature_range, .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch, .get_power_limit = navi10_get_power_limit, + .update_pcie_parameters = navi10_update_pcie_parameters, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_enter = smu_v11_0_baco_enter, + .baco_exit = smu_v11_0_baco_exit, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, + .set_default_od_settings = navi10_set_default_od_settings, + .od_edit_dpm_table = navi10_od_edit_dpm_table, + .get_pptable_power_limit = navi10_get_pptable_power_limit, + .run_btc = navi10_run_btc, }; void navi10_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &navi10_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 620ff17c2fef..ec03c7992f6d 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -27,6 +27,17 @@ #define NAVI10_PEAK_SCLK_XT (1755) #define NAVI10_PEAK_SCLK_XL (1625) +#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670) +#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448) +#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181) +#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717) +#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448) + +#define NAVI10_VOLTAGE_SCALE (4) + +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index e62bfba51562..89a54f8e08d3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "soc15_common.h" #include "smu_v12_0_ppsmc.h" #include "smu12_driver_if.h" @@ -30,6 +31,9 @@ #include "renoir_ppt.h" +#define CLK_MAP(clk, index) \ + [SMU_##clk] = {1, (index)} + #define MSG_MAP(msg, index) \ [SMU_MSG_##msg] = {1, (index)} @@ -103,6 +107,14 @@ static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq), }; +static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, CLOCK_GFXCLK), + CLK_MAP(SCLK, CLOCK_GFXCLK), + CLK_MAP(SOCCLK, CLOCK_SOCCLK), + CLK_MAP(UCLK, CLOCK_UMCCLK), + CLK_MAP(MCLK, CLOCK_UMCCLK), +}; + static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(WATERMARKS), TAB_MAP_INVALID(CUSTOM_DPM), @@ -124,6 +136,21 @@ static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index) return mapping.map_to; } +static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index) +{ + struct smu_12_0_cmn2aisc_mapping mapping; + + if (index >= SMU_CLK_COUNT) + return -EINVAL; + + mapping = renoir_clk_map[index]; + if (!(mapping.valid_mapping)) { + return -EINVAL; + } + + return mapping.map_to; +} + static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index) { struct smu_12_0_cmn2aisc_mapping mapping; @@ -138,6 +165,27 @@ static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index) return mapping.map_to; } +static int renoir_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) { struct smu_table_context *smu_table = &smu->smu_table; @@ -153,6 +201,11 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) if (!smu_table->clocks_table) return -ENOMEM; + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + return 0; } @@ -160,21 +213,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) * This interface just for getting uclk ultimate freq and should't introduce * other likewise function result in overmuch callback. */ -static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max) +static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t dpm_level, uint32_t *freq) { + DpmClocks_t *clk_table = smu->smu_table.clocks_table; - DpmClocks_t *table = smu->smu_table.clocks_table; - - if (!clock || !table) + if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; - if (max) - *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq; - else - *clock = table->FClocks[0].Freq; + GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq); return 0; - } static int renoir_print_clk_levels(struct smu_context *smu, @@ -183,11 +232,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics = {0}; + SmuMetrics_t metrics; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; + memset(&metrics, 0, sizeof(metrics)); + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); if (ret) @@ -198,7 +249,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_SCLK: /* retirve table returned paramters unit is MHz */ cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; - ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max); + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false); if (!ret) { /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ if (cur_value == max) @@ -246,20 +297,596 @@ static int renoir_print_clk_levels(struct smu_context *smu, return size; } +static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) +{ + enum amd_pm_state_type pm_type; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + + if (!smu_dpm_ctx->dpm_context || + !smu_dpm_ctx->dpm_current_power_state) + return -EINVAL; + + switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { + case SMU_STATE_UI_LABEL_BATTERY: + pm_type = POWER_STATE_TYPE_BATTERY; + break; + case SMU_STATE_UI_LABEL_BALLANCED: + pm_type = POWER_STATE_TYPE_BALANCED; + break; + case SMU_STATE_UI_LABEL_PERFORMANCE: + pm_type = POWER_STATE_TYPE_PERFORMANCE; + break; + default: + if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT) + pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; + else + pm_type = POWER_STATE_TYPE_DEFAULT; + break; + } + + return pm_type; +} + +static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); + if (ret) + return ret; + } + power_gate->vcn_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->vcn_gated = true; + } + + return ret; +} + +static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + if (ret) + return ret; + } + power_gate->jpeg_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + if (ret) + return ret; + } + power_gate->jpeg_gated = true; + } + + return ret; +} + +static int renoir_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + int ret = 0, clk_id = 0; + SmuMetrics_t metrics; + + ret = renoir_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + *value = metrics.ClockFrequency[clk_id]; + + return ret; +} + +static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) +{ + int ret = 0, i = 0; + uint32_t min_freq, max_freq, force_freq; + enum smu_clk_type clk_type; + + enum smu_clk_type clks[] = { + SMU_GFXCLK, + SMU_MCLK, + SMU_SOCCLK, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + + force_freq = highest ? max_freq : min_freq; + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + if (ret) + return ret; + } + + return ret; +} + +static int renoir_unforce_dpm_levels(struct smu_context *smu) { + + int ret = 0, i = 0; + uint32_t min_freq, max_freq; + enum smu_clk_type clk_type; + + struct clk_feature_map { + enum smu_clk_type clk_type; + uint32_t feature; + } clk_feature_map[] = { + {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT}, + {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT}, + {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, + }; + + for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { + if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature)) + continue; + + clk_type = clk_feature_map[i].clk_type; + + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + if (ret) + return ret; + } + + return ret; +} + +static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = renoir_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + *value = (metrics.GfxTemperature / 100) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return 0; +} + +static int renoir_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = renoir_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + *value = metrics.AverageGfxActivity / 100; + break; + default: + pr_err("Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return 0; +} + +static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile) +{ + + uint32_t pplib_workload = 0; + + switch (profile) { + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; + break; + case PP_SMC_POWER_PROFILE_CUSTOM: + pplib_workload = WORKLOAD_PPLIB_COUNT; + break; + case PP_SMC_POWER_PROFILE_VIDEO: + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; + break; + case PP_SMC_POWER_PROFILE_VR: + pplib_workload = WORKLOAD_PPLIB_VR_BIT; + break; + case PP_SMC_POWER_PROFILE_COMPUTE: + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; + break; + default: + return -EINVAL; + } + + return pplib_workload; +} + +static int renoir_get_profiling_clk_mask(struct smu_context *smu, + enum amd_dpm_forced_level level, + uint32_t *sclk_mask, + uint32_t *mclk_mask, + uint32_t *soc_mask) +{ + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + if (sclk_mask) + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + if (mclk_mask) + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + if(sclk_mask) + /* The sclk as gfxclk and has three level about max/min/current */ + *sclk_mask = 3 - 1; + + if(mclk_mask) + *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; + + if(soc_mask) + *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; + } + + return 0; +} + +/** + * This interface get dpm clock table for dc + */ +static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t *table = smu->smu_table.clocks_table; + int i; + + if (!clock_table || !table) + return -EINVAL; + + for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) { + clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq; + clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol; + } + + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { + clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq; + clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol; + } + + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { + clock_table->FClocks[i].Freq = table->FClocks[i].Freq; + clock_table->FClocks[i].Vol = table->FClocks[i].Vol; + } + + for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) { + clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq; + clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol; + } + + return 0; +} + +static int renoir_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, uint32_t mask) +{ + + int ret = 0 ; + uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if (soft_min_level > 2 || soft_max_level > 2) { + pr_info("Currently sclk only support 3 levels on APU\n"); + return -EINVAL; + } + + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + soft_max_level == 0 ? min_freq : + soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, + soft_min_level == 2 ? max_freq : + soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); + if (ret) + return ret; + break; + case SMU_SOCCLK: + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); + if (ret) + return ret; + break; + case SMU_MCLK: + case SMU_FCLK: + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); + if (ret) + return ret; + break; + default: + break; + } + + return ret; +} + +static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +{ + int workload_type, ret; + uint32_t profile_mode = input[size]; + + if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + if (workload_type < 0) { + pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode); + return -EINVAL; + } + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + 1 << workload_type); + if (ret) { + pr_err("Fail to set workload type %d\n", workload_type); + return ret; + } + + smu->power_profile_mode = profile_mode; + + return 0; +} + +static int renoir_set_peak_clock_by_device(struct smu_context *smu) +{ + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = renoir_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* save watermark settings into pplib smu structure, + * also pass data to smu controller + */ +static int renoir_set_watermarks_table( + struct smu_context *smu, + void *watermarks, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) +{ + int i; + int ret = 0; + Watermarks_t *table = watermarks; + + if (!table || !clock_ranges) + return -EINVAL; + + if (clock_ranges->num_wm_dmif_sets > 4 || + clock_ranges->num_wm_mcif_sets > 4) + return -EINVAL; + + /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/ + for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { + table->WatermarkRow[WM_DCFCLK][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MinMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MaxMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t) + clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; + } + + for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { + table->WatermarkRow[WM_SOCCLK][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MinMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MaxMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) + clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; + } + + /* pass data to smu controller */ + ret = smu_write_watermarks_table(smu); + + return ret; +} + +static int renoir_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + static const char *profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + uint32_t i, size = 0; + int16_t workload_type = 0; + + if (!smu->pm_enabled || !buf) + return -EINVAL; + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT + * Not all profile modes are supported on arcturus. + */ + workload_type = smu_workload_get_type(smu, i); + if (workload_type < 0) + continue; + + size += sprintf(buf + size, "%2d %14s%s\n", + i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + } + + return size; +} + +static int renoir_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + if (!data || !size) + return -EINVAL; + + mutex_lock(&smu->sensor_lock); + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_TEMP: + ret = renoir_get_gpu_temperature(smu, (uint32_t *)data); + *size = 4; + break; + default: + ret = smu_v12_0_read_sensor(smu, sensor, data, size); + } + mutex_unlock(&smu->sensor_lock); + + return ret; +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, + .get_smu_clk_index = renoir_get_smu_clk_index, .get_smu_table_index = renoir_get_smu_table_index, .tables_init = renoir_tables_init, .set_power_state = NULL, - .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited, + .get_dpm_clk_limited = renoir_get_dpm_clk_limited, .print_clk_levels = renoir_print_clk_levels, + .get_current_power_state = renoir_get_current_power_state, + .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable, + .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable, + .get_current_clk_freq_by_table = renoir_get_current_clk_freq_by_table, + .force_dpm_limit_value = renoir_force_dpm_limit_value, + .unforce_dpm_levels = renoir_unforce_dpm_levels, + .get_workload_type = renoir_get_workload_type, + .get_profiling_clk_mask = renoir_get_profiling_clk_mask, + .force_clk_levels = renoir_force_clk_levels, + .set_power_profile_mode = renoir_set_power_profile_mode, + .set_performance_level = renoir_set_performance_level, + .get_dpm_clock_table = renoir_get_dpm_clock_table, + .set_watermarks_table = renoir_set_watermarks_table, + .get_power_profile_mode = renoir_get_power_profile_mode, + .read_sensor = renoir_read_sensor, + .check_fw_status = smu_v12_0_check_fw_status, + .check_fw_version = smu_v12_0_check_fw_version, + .powergate_sdma = smu_v12_0_powergate_sdma, + .powergate_vcn = smu_v12_0_powergate_vcn, + .powergate_jpeg = smu_v12_0_powergate_jpeg, + .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, + .read_smc_arg = smu_v12_0_read_arg, + .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, + .gfx_off_control = smu_v12_0_gfx_off_control, + .init_smc_tables = smu_v12_0_init_smc_tables, + .fini_smc_tables = smu_v12_0_fini_smc_tables, + .populate_smc_tables = smu_v12_0_populate_smc_tables, + .get_enabled_mask = smu_v12_0_get_enabled_mask, + .get_current_clk_freq = smu_v12_0_get_current_clk_freq, + .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, + .mode2_reset = smu_v12_0_mode2_reset, + .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, }; void renoir_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &renoir_ppt_funcs; smu->smc_if_version = SMU12_DRIVER_IF_VERSION; - smu_table->table_count = TABLE_COUNT; + smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h new file mode 100644 index 000000000000..60ce1fccaeb5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -0,0 +1,208 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SMU_INTERNAL_H__ +#define __SMU_INTERNAL_H__ + +#include "amdgpu_smu.h" + +#define smu_init_microcode(smu) \ + ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0) +#define smu_init_smc_tables(smu) \ + ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0) +#define smu_fini_smc_tables(smu) \ + ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0) +#define smu_init_power(smu) \ + ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0) +#define smu_fini_power(smu) \ + ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0) + +#define smu_setup_pptable(smu) \ + ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0) +#define smu_powergate_sdma(smu, gate) \ + ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0) +#define smu_powergate_vcn(smu, gate) \ + ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0) +#define smu_powergate_jpeg(smu, gate) \ + ((smu)->ppt_funcs->powergate_jpeg ? (smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0) + +#define smu_get_vbios_bootup_values(smu) \ + ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0) +#define smu_get_clk_info_from_vbios(smu) \ + ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0) +#define smu_check_pptable(smu) \ + ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0) +#define smu_parse_pptable(smu) \ + ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0) +#define smu_populate_smc_tables(smu) \ + ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0) +#define smu_check_fw_version(smu) \ + ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0) +#define smu_write_pptable(smu) \ + ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0) +#define smu_set_min_dcef_deep_sleep(smu) \ + ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0) +#define smu_set_tool_table_location(smu) \ + ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0) +#define smu_notify_memory_pool_location(smu) \ + ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0) +#define smu_gfx_off_control(smu, enable) \ + ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0) + +#define smu_set_last_dcef_min_deep_sleep_clk(smu) \ + ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0) +#define smu_system_features_control(smu, en) \ + ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0) +#define smu_init_max_sustainable_clocks(smu) \ + ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0) +#define smu_set_default_od_settings(smu, initialize) \ + ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) + +int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); + +#define smu_send_smc_msg_with_param(smu, msg, param) \ + ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) +#define smu_read_smc_arg(smu, arg) \ + ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) +#define smu_alloc_dpm_context(smu) \ + ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) +#define smu_init_display_count(smu, count) \ + ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0) +#define smu_feature_set_allowed_mask(smu) \ + ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0) +#define smu_feature_get_enabled_mask(smu, mask, num) \ + ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0) +#define smu_is_dpm_running(smu) \ + ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) +#define smu_notify_display_change(smu) \ + ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0) +#define smu_store_powerplay_table(smu) \ + ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0) +#define smu_check_powerplay_table(smu) \ + ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0) +#define smu_append_powerplay_table(smu) \ + ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0) +#define smu_set_default_dpm_table(smu) \ + ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0) +#define smu_populate_umd_state_clk(smu) \ + ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) +#define smu_set_default_od8_settings(smu) \ + ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) + +#define smu_get_current_clk_freq(smu, clk_id, value) \ + ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) + +#define smu_tables_init(smu, tab) \ + ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) +#define smu_set_thermal_fan_table(smu) \ + ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) +#define smu_start_thermal_control(smu) \ + ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0) +#define smu_stop_thermal_control(smu) \ + ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0) + +#define smu_smc_read_sensor(smu, sensor, data, size) \ + ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) + +#define smu_pre_display_config_changed(smu) \ + ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) +#define smu_display_config_changed(smu) \ + ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) +#define smu_apply_clocks_adjust_rules(smu) \ + ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) +#define smu_notify_smc_dispaly_config(smu) \ + ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) +#define smu_force_dpm_limit_value(smu, highest) \ + ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) +#define smu_unforce_dpm_levels(smu) \ + ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) +#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ + ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) +#define smu_set_cpu_power_state(smu) \ + ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) + +#define smu_msg_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_clk_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_feature_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_table_get_index(smu, tab) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) +#define smu_power_get_index(smu, src) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) +#define smu_workload_get_type(smu, profile) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) +#define smu_run_btc(smu) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0) +#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) + + +#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ + ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) + +#define smu_get_dal_power_level(smu, clocks) \ + ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0) +#define smu_get_perf_level(smu, designation, level) \ + ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0) +#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ + ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) + +#define smu_dpm_set_uvd_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) +#define smu_dpm_set_vce_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) +#define smu_dpm_set_jpeg_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_jpeg_enable ? (smu)->ppt_funcs->dpm_set_jpeg_enable((smu), (enable)) : 0) + +#define smu_set_watermarks_table(smu, tab, clock_ranges) \ + ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) +#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ + ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) +#define smu_thermal_temperature_range_update(smu, range, rw) \ + ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) +#define smu_get_thermal_temperature_range(smu, range) \ + ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) +#define smu_register_irq_handler(smu) \ + ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0) + +#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ + ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) + +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); +#define smu_dump_pptable(smu) \ + ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) +#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \ + ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL) + +#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \ + ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL) + +#define smu_override_pcie_parameters(smu) \ + ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0) + +#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ + ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c5257ae3188a..7781d245f8ef 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -24,17 +24,20 @@ #include <linux/module.h> #include <linux/pci.h> +#define SMU_11_0_PARTIAL_PPTABLE + #include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" +#include "smu_v11_0_pptable.h" #include "soc15_common.h" #include "atom.h" -#include "vega20_ppt.h" -#include "arcturus_ppt.h" -#include "navi10_ppt.h" +#include "amd_pcie.h" +#include "amdgpu_ras.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" @@ -61,7 +64,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) +int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -77,47 +80,20 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) for (i = 0; i < timeout; i++) { cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) - break; + return cur_value == 0x1 ? 0 : -EIO; + udelay(1); } /* timeout means wrong logic */ - if (i == timeout) - return -ETIME; - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; -} - -static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) -{ - struct amdgpu_device *adev = smu->adev; - int ret = 0, index = 0; - - index = smu_msg_get_index(smu, msg); - if (index < 0) - return index; - - smu_v11_0_wait_for_response(smu); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); - - ret = smu_v11_0_wait_for_response(smu); - - if (ret) - pr_err("failed send message: %10s (%d) response %#x\n", - smu_get_message_name(smu, msg), index, ret); - - return ret; - + return -ETIME; } -static int -smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, +int +smu_v11_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, uint32_t param) { - struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -126,9 +102,11 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return index; ret = smu_v11_0_wait_for_response(smu); - if (ret) - pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", - smu_get_message_name(smu, msg), index, param, ret); + if (ret) { + pr_err("Msg issuing pre-check failed and " + "SMU may be not in the right state!\n"); + return ret; + } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -144,7 +122,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return ret; } -static int smu_v11_0_init_microcode(struct smu_context *smu) +int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const char *chip_name; @@ -206,7 +184,7 @@ out: return err; } -static int smu_v11_0_load_microcode(struct smu_context *smu) +int smu_v11_0_load_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const uint32_t *src; @@ -244,7 +222,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu) return 0; } -static int smu_v11_0_check_fw_status(struct smu_context *smu) +int smu_v11_0_check_fw_status(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; @@ -259,7 +237,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu) return -EIO; } -static int smu_v11_0_check_fw_version(struct smu_context *smu) +int smu_v11_0_check_fw_version(struct smu_context *smu) { uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; @@ -354,7 +332,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, return 0; } -static int smu_v11_0_setup_pptable(struct smu_context *smu) +int smu_v11_0_setup_pptable(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const struct smc_firmware_header_v1_0 *hdr; @@ -369,6 +347,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) version_major = le16_to_cpu(hdr->header.header_version_major); version_minor = le16_to_cpu(hdr->header.header_version_minor); if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { + pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); switch (version_minor) { case 0: ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); @@ -385,6 +364,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) return ret; } else { + pr_info("use vbios provided pptable\n"); index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, powerplayinfo); @@ -433,13 +413,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu) return 0; } -static int smu_v11_0_init_smc_tables(struct smu_context *smu) +int smu_v11_0_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = NULL; int ret = 0; - if (smu_table->tables || smu_table->table_count == 0) + if (smu_table->tables) return -EINVAL; tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), @@ -460,18 +440,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu) return 0; } -static int smu_v11_0_fini_smc_tables(struct smu_context *smu) +int smu_v11_0_fini_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; int ret = 0; - if (!smu_table->tables || smu_table->table_count == 0) + if (!smu_table->tables) return -EINVAL; kfree(smu_table->tables); kfree(smu_table->metrics_table); smu_table->tables = NULL; - smu_table->table_count = 0; smu_table->metrics_table = NULL; smu_table->metrics_time = 0; @@ -481,7 +460,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu) return 0; } -static int smu_v11_0_init_power(struct smu_context *smu) +int smu_v11_0_init_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; @@ -499,7 +478,7 @@ static int smu_v11_0_init_power(struct smu_context *smu) return 0; } -static int smu_v11_0_fini_power(struct smu_context *smu) +int smu_v11_0_fini_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; @@ -576,7 +555,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) return 0; } -static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) +int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) { int ret, index; struct amdgpu_device *adev = smu->adev; @@ -673,7 +652,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) return 0; } -static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *memory_pool = &smu_table->memory_pool; @@ -719,7 +698,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) return ret; } -static int smu_v11_0_check_pptable(struct smu_context *smu) +int smu_v11_0_check_pptable(struct smu_context *smu) { int ret; @@ -727,7 +706,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_parse_pptable(struct smu_context *smu) +int smu_v11_0_parse_pptable(struct smu_context *smu) { int ret; @@ -751,7 +730,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_populate_smc_pptable(struct smu_context *smu) +int smu_v11_0_populate_smc_pptable(struct smu_context *smu) { int ret; @@ -760,7 +739,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_write_pptable(struct smu_context *smu) +int smu_v11_0_write_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; int ret = 0; @@ -771,24 +750,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_write_watermarks_table(struct smu_context *smu) -{ - int ret = 0; - struct smu_table_context *smu_table = &smu->smu_table; - struct smu_table *table = NULL; - - table = &smu_table->tables[SMU_TABLE_WATERMARKS]; - - if (!table->cpu_addr) - return -EINVAL; - - ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, - true); - - return ret; -} - -static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) +int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) { int ret; @@ -800,7 +762,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl return ret; } -static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) +int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -809,11 +771,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) if (!table_context) return -EINVAL; - return smu_set_deep_sleep_dcefclk(smu, - table_context->boot_values.dcefclk / 100); + return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); } -static int smu_v11_0_set_tool_table_location(struct smu_context *smu) +int smu_v11_0_set_tool_table_location(struct smu_context *smu) { int ret = 0; struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; @@ -831,7 +792,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu) return ret; } -static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; @@ -843,7 +804,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) } -static int smu_v11_0_set_allowed_mask(struct smu_context *smu) +int smu_v11_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; int ret = 0; @@ -870,7 +831,7 @@ failed: return ret; } -static int smu_v11_0_get_enabled_mask(struct smu_context *smu, +int smu_v11_0_get_enabled_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) { uint32_t feature_mask_high = 0, feature_mask_low = 0; @@ -899,7 +860,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu, return ret; } -static int smu_v11_0_system_features_control(struct smu_context *smu, +int smu_v11_0_system_features_control(struct smu_context *smu, bool en) { struct smu_feature *feature = &smu->smu_feature; @@ -925,7 +886,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu, return ret; } -static int smu_v11_0_notify_display_change(struct smu_context *smu) +int smu_v11_0_notify_display_change(struct smu_context *smu) { int ret = 0; @@ -983,7 +944,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, return ret; } -static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) { struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; int ret = 0; @@ -1063,13 +1024,44 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) return 0; } -static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) +uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) { + uint32_t od_limit, max_power_limit; + struct smu_11_0_powerplay_table *powerplay_table = NULL; + struct smu_table_context *table_context = &smu->smu_table; + powerplay_table = table_context->power_play_table; + + max_power_limit = smu_get_pptable_power_limit(smu); + + if (!max_power_limit) { + // If we couldn't get the table limit, fall back on first-read value + if (!smu->default_power_limit) + smu->default_power_limit = smu->power_limit; + max_power_limit = smu->default_power_limit; + } + + if (smu->od_enabled) { + od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + + pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit); + + max_power_limit *= (100 + od_limit); + max_power_limit /= 100; + } + + return max_power_limit; +} + +int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) { int ret = 0; + uint32_t max_power_limit; + + max_power_limit = smu_v11_0_get_max_power_limit(smu); - if (n > smu->default_power_limit) { - pr_err("New power limit is over the max allowed %d\n", - smu->default_power_limit); + if (n > max_power_limit) { + pr_err("New power limit (%d) is over the max allowed %d\n", + n, + max_power_limit); return -EINVAL; } @@ -1091,7 +1083,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return 0; } -static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, +int smu_v11_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value) { @@ -1170,7 +1162,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu) return 0; } -static int smu_v11_0_start_thermal_control(struct smu_context *smu) +int smu_v11_0_start_thermal_control(struct smu_context *smu) { int ret = 0; struct smu_temperature_range range; @@ -1212,6 +1204,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } +int smu_v11_0_stop_thermal_control(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); + + return 0; +} + static uint16_t convert_to_vddc(uint8_t vid) { return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); @@ -1236,7 +1237,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) } -static int smu_v11_0_read_sensor(struct smu_context *smu, +int smu_v11_0_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { @@ -1273,7 +1274,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu, return ret; } -static int +int smu_v11_0_display_clock_voltage_request(struct smu_context *smu, struct pp_display_clock_request *clock_req) @@ -1316,9 +1317,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, if (clk_select == SMU_UCLK && smu->disable_uclk_switch) return 0; - mutex_lock(&smu->mutex); ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); - mutex_unlock(&smu->mutex); if(clk_select == SMU_UCLK) smu->hard_min_uclk_req_from_dal = clk_freq; @@ -1328,27 +1327,7 @@ failed: return ret; } -static int -smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct - dm_pp_wm_sets_with_clock_ranges_soc15 - *clock_ranges) -{ - int ret = 0; - struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; - void *table = watermarks->cpu_addr; - - if (!smu->disable_watermark && - smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && - smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { - smu_set_watermarks_table(smu, table, clock_ranges); - smu->watermarks_bitmap |= WATERMARKS_EXIST; - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; - } - - return ret; -} - -static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) { int ret = 0; struct amdgpu_device *adev = smu->adev; @@ -1361,12 +1340,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case CHIP_NAVI12: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; - mutex_lock(&smu->mutex); if (enable) ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); else ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); - mutex_unlock(&smu->mutex); break; default: break; @@ -1375,7 +1352,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static uint32_t +uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) @@ -1415,7 +1392,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) return 0; } -static int +int smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; @@ -1444,7 +1421,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); } -static int +int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) { @@ -1472,7 +1449,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, return ret; } -static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; @@ -1482,10 +1459,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - mutex_lock(&(smu->mutex)); ret = smu_v11_0_auto_fan_control(smu, 0); if (ret) - goto set_fan_speed_rpm_failed; + return ret; crystal_clock_freq = amdgpu_asic_get_xclk(adev); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); @@ -1496,23 +1472,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); -set_fan_speed_rpm_failed: - mutex_unlock(&(smu->mutex)); return ret; } -#define XGMI_STATE_D0 1 -#define XGMI_STATE_D3 0 - -static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { int ret = 0; - mutex_lock(&(smu->mutex)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetXgmiMode, - pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); - mutex_unlock(&(smu->mutex)); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); return ret; } @@ -1559,7 +1528,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = .process = smu_v11_0_irq_process, }; -static int smu_v11_0_register_irq_handler(struct smu_context *smu) +int smu_v11_0_register_irq_handler(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct amdgpu_irq_src *irq_src = smu->irq_source; @@ -1591,7 +1560,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu) return ret; } -static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks) { struct smu_table_context *table_context = &smu->smu_table; @@ -1621,13 +1590,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, return 0; } -static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) { int ret = 0; - mutex_lock(&smu->mutex); ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); - mutex_unlock(&smu->mutex); return ret; } @@ -1637,7 +1604,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); } -static bool smu_v11_0_baco_is_support(struct smu_context *smu) +bool smu_v11_0_baco_is_support(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -1651,7 +1618,9 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu) if (!baco_support) return false; - if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) + /* Arcturus does not support this bit mask */ + if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && + !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) return false; val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); @@ -1661,7 +1630,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu) return false; } -static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; enum smu_baco_state baco_state; @@ -1673,10 +1642,14 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) return baco_state; } -static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + uint32_t bif_doorbell_intr_cntl; + uint32_t data; int ret = 0; if (smu_v11_0_baco_get_state(smu) == state) @@ -1684,10 +1657,37 @@ static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state mutex_lock(&smu_baco->mutex); - if (state == SMU_BACO_STATE_ENTER) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO); - else + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + + if (state == SMU_BACO_STATE_ENTER) { + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + if (!ras || !ras->supported) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0); + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1); + } + } else { ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + if (ret) + goto out; + + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, 0); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + /* clear vbios scratch 6 and 7 for coming asic reinit */ + WREG32(adev->bios_scratch_reg_offset + 6, 0); + WREG32(adev->bios_scratch_reg_offset + 7, 0); + } if (ret) goto out; @@ -1697,13 +1697,17 @@ out: return ret; } -static int smu_v11_0_baco_reset(struct smu_context *smu) +int smu_v11_0_baco_enter(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; - ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); - if (ret) - return ret; + /* Arcturus does not need this audio workaround */ + if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); + if (ret) + return ret; + } ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); if (ret) @@ -1711,6 +1715,13 @@ static int smu_v11_0_baco_reset(struct smu_context *smu) msleep(10); + return ret; +} + +int smu_v11_0_baco_exit(struct smu_context *smu) +{ + int ret = 0; + ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); if (ret) return ret; @@ -1718,13 +1729,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu) return ret; } -static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { int ret = 0, clk_id = 0; uint32_t param = 0; - mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); if (clk_id < 0) { ret = -EINVAL; @@ -1751,80 +1761,102 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk } failed: - mutex_unlock(&smu->mutex); return ret; } -static const struct smu_funcs smu_v11_0_funcs = { - .init_microcode = smu_v11_0_init_microcode, - .load_microcode = smu_v11_0_load_microcode, - .check_fw_status = smu_v11_0_check_fw_status, - .check_fw_version = smu_v11_0_check_fw_version, - .send_smc_msg = smu_v11_0_send_msg, - .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, - .setup_pptable = smu_v11_0_setup_pptable, - .init_smc_tables = smu_v11_0_init_smc_tables, - .fini_smc_tables = smu_v11_0_fini_smc_tables, - .init_power = smu_v11_0_init_power, - .fini_power = smu_v11_0_fini_power, - .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, - .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, - .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, - .check_pptable = smu_v11_0_check_pptable, - .parse_pptable = smu_v11_0_parse_pptable, - .populate_smc_tables = smu_v11_0_populate_smc_pptable, - .write_pptable = smu_v11_0_write_pptable, - .write_watermarks_table = smu_v11_0_write_watermarks_table, - .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, - .set_tool_table_location = smu_v11_0_set_tool_table_location, - .init_display_count = smu_v11_0_init_display_count, - .set_allowed_mask = smu_v11_0_set_allowed_mask, - .get_enabled_mask = smu_v11_0_get_enabled_mask, - .system_features_control = smu_v11_0_system_features_control, - .notify_display_change = smu_v11_0_notify_display_change, - .set_power_limit = smu_v11_0_set_power_limit, - .get_current_clk_freq = smu_v11_0_get_current_clk_freq, - .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, - .start_thermal_control = smu_v11_0_start_thermal_control, - .read_sensor = smu_v11_0_read_sensor, - .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, - .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, - .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_fan_control_mode = smu_v11_0_get_fan_control_mode, - .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, - .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, - .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, - .gfx_off_control = smu_v11_0_gfx_off_control, - .register_irq_handler = smu_v11_0_register_irq_handler, - .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, - .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, - .baco_reset = smu_v11_0_baco_reset, - .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, -}; +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) +{ + int ret = 0, clk_id = 0; + uint32_t param; -void smu_v11_0_set_smu_funcs(struct smu_context *smu) + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, + param); + if (ret) + return ret; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, + param); + if (ret) + return ret; + } + + return ret; +} + +int smu_v11_0_override_pcie_parameters(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + uint32_t pcie_gen = 0, pcie_width = 0; + int ret; - smu->funcs = &smu_v11_0_funcs; - switch (adev->asic_type) { - case CHIP_VEGA20: - vega20_set_ppt_funcs(smu); - break; - case CHIP_ARCTURUS: - arcturus_set_ppt_funcs(smu); - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - navi10_set_ppt_funcs(smu); - break; - default: - pr_warn("Unknown asic for smu11\n"); + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 + * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 + * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 + */ + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); + + if (ret) + pr_err("[%s] Attempt to override pcie params failed!\n", __func__); + + return ret; + +} + +int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size) +{ + struct smu_table_context *table_context = &smu->smu_table; + int ret = 0; + + if (initialize) { + if (table_context->overdrive_table) { + return -EINVAL; + } + table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL); + if (!table_context->overdrive_table) { + return -ENOMEM; + } + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); + if (ret) { + pr_err("Failed to export overdrive table!\n"); + return ret; + } } + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); + if (ret) { + pr_err("Failed to import overdrive table!\n"); + return ret; + } + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 9d2280ca1f4b..2ac7f2f231b6 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -24,12 +24,12 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v12_0.h" #include "soc15_common.h" #include "atom.h" -#include "renoir_ppt.h" #include "asic_reg/mp/mp_12_0_0_offset.h" #include "asic_reg/mp/mp_12_0_0_sh_mask.h" @@ -41,7 +41,7 @@ #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 -static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, +int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; @@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) +int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) return 0; } -static int smu_v12_0_wait_for_response(struct smu_context *smu) +int smu_v12_0_wait_for_response(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t cur_value, i; @@ -66,44 +66,18 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu) for (i = 0; i < adev->usec_timeout; i++) { cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) - break; + return cur_value == 0x1 ? 0 : -EIO; + udelay(1); } /* timeout means wrong logic */ - if (i == adev->usec_timeout) - return -ETIME; - - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; -} - -static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) -{ - struct amdgpu_device *adev = smu->adev; - int ret = 0, index = 0; - - index = smu_msg_get_index(smu, msg); - if (index < 0) - return index; - - smu_v12_0_wait_for_response(smu); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index); - - ret = smu_v12_0_wait_for_response(smu); - - if (ret) - pr_err("Failed to send message 0x%x, response 0x%x\n", index, - ret); - - return ret; - + return -ETIME; } -static int -smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, +int +smu_v12_0_send_msg_with_param(struct smu_context *smu, + enum smu_message_type msg, uint32_t param) { struct amdgpu_device *adev = smu->adev; @@ -114,9 +88,11 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return index; ret = smu_v12_0_wait_for_response(smu); - if (ret) - pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n", - index, ret, param); + if (ret) { + pr_err("Msg issuing pre-check failed and " + "SMU may be not in the right state!\n"); + return ret; + } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -132,7 +108,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return ret; } -static int smu_v12_0_check_fw_status(struct smu_context *smu) +int smu_v12_0_check_fw_status(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; @@ -147,7 +123,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu) return -EIO; } -static int smu_v12_0_check_fw_version(struct smu_context *smu) +int smu_v12_0_check_fw_version(struct smu_context *smu) { uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; @@ -181,7 +157,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu) return ret; } -static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) { if (!(smu->adev->flags & AMD_IS_APU)) return 0; @@ -192,7 +168,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); } -static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) { if (!(smu->adev->flags & AMD_IS_APU)) return 0; @@ -203,7 +179,18 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); } -static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) +int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) +{ + if (!(smu->adev->flags & AMD_IS_APU)) + return 0; + + if (gate) + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + else + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); +} + +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) { if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) return 0; @@ -212,6 +199,39 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) SMU_MSG_SetGfxCGPG, enable ? 1 : 0); } +int smu_v12_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + if(!data || !size) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_MIN_FAN_RPM: + *(uint32_t *)data = 0; + *size = 4; + break; + default: + ret = smu_common_read_sensor(smu, sensor, data, size); + break; + } + + if (ret) + *size = 0; + + return ret; +} + /** * smu_v12_0_get_gfxoff_status - get gfxoff status * @@ -224,7 +244,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) * Returns 2=Not in GFXOFF. * Returns 3=Transition into GFXOFF. */ -static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) { uint32_t reg; uint32_t gfxOff_Status = 0; @@ -237,22 +257,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) return gfxOff_Status; } -static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) { int ret = 0, timeout = 500; if (enable) { ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); - /* confirm gfx is back to "off" state, timeout is 5 seconds */ - while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) { - msleep(10); - timeout--; - if (timeout == 0) { - DRM_ERROR("enable gfxoff timeout and failed!\n"); - break; - } - } } else { ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); @@ -270,12 +281,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v12_0_init_smc_tables(struct smu_context *smu) +int smu_v12_0_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = NULL; - if (smu_table->tables || smu_table->table_count == 0) + if (smu_table->tables) return -EINVAL; tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), @@ -288,11 +299,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu) return smu_tables_init(smu, tables); } -static int smu_v12_0_fini_smc_tables(struct smu_context *smu) +int smu_v12_0_fini_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - if (!smu_table->tables || smu_table->table_count == 0) + if (!smu_table->tables) return -EINVAL; kfree(smu_table->clocks_table); @@ -304,7 +315,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu) return 0; } -static int smu_v12_0_populate_smc_tables(struct smu_context *smu) +int smu_v12_0_populate_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = NULL; @@ -319,14 +330,69 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu) return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } -static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max) +int smu_v12_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) { + uint32_t feature_mask_high = 0, feature_mask_low = 0; int ret = 0; - mutex_lock(&smu->mutex); + if (!feature_mask || num < 2) + return -EINVAL; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); + if (ret) + return ret; + ret = smu_read_smc_arg(smu, &feature_mask_high); + if (ret) + return ret; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); + if (ret) + return ret; + ret = smu_read_smc_arg(smu, &feature_mask_low); + if (ret) + return ret; + + feature_mask[0] = feature_mask_low; + feature_mask[1] = feature_mask_high; + + return ret; +} + +int smu_v12_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value) +{ + int ret = 0; + uint32_t freq = 0; + + if (clk_id >= SMU_CLK_COUNT || !value) + return -EINVAL; + + ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); + if (ret) + return ret; + + freq *= 100; + *value = freq; + + return ret; +} + +int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + int ret = 0; + uint32_t mclk_mask, soc_mask; if (max) { + ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, + NULL, + &mclk_mask, + &soc_mask); + if (ret) + goto failed; + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -340,14 +406,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk goto failed; break; case SMU_UCLK: - ret = smu_get_dpm_uclk_limited(smu, max, true); + case SMU_FCLK: + case SMU_MCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); + if (ret) + goto failed; + break; + case SMU_SOCCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max); if (ret) goto failed; break; default: ret = -EINVAL; goto failed; - } } @@ -365,7 +437,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk goto failed; break; case SMU_UCLK: - ret = smu_get_dpm_uclk_limited(smu, min, false); + case SMU_FCLK: + case SMU_MCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min); + if (ret) + goto failed; + break; + case SMU_SOCCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min); if (ret) goto failed; break; @@ -373,40 +452,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ret = -EINVAL; goto failed; } - } failed: - mutex_unlock(&smu->mutex); return ret; } -static const struct smu_funcs smu_v12_0_funcs = { - .check_fw_status = smu_v12_0_check_fw_status, - .check_fw_version = smu_v12_0_check_fw_version, - .powergate_sdma = smu_v12_0_powergate_sdma, - .powergate_vcn = smu_v12_0_powergate_vcn, - .send_smc_msg = smu_v12_0_send_msg, - .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, - .read_smc_arg = smu_v12_0_read_arg, - .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, - .gfx_off_control = smu_v12_0_gfx_off_control, - .init_smc_tables = smu_v12_0_init_smc_tables, - .fini_smc_tables = smu_v12_0_fini_smc_tables, - .populate_smc_tables = smu_v12_0_populate_smc_tables, - .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, -}; - -void smu_v12_0_set_smu_funcs(struct smu_context *smu) +int smu_v12_0_mode2_reset(struct smu_context *smu){ + return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); +} + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) { - struct amdgpu_device *adev = smu->adev; + int ret = 0; - smu->funcs = &smu_v12_0_funcs; + if (max < min) + return -EINVAL; - switch (adev->asic_type) { - case CHIP_RENOIR: - renoir_set_ppt_funcs(smu); - break; + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); + if (ret) + return ret; + break; + case SMU_FCLK: + case SMU_MCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); + if (ret) + return ret; + break; + case SMU_SOCCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); + if (ret) + return ret; + break; + case SMU_VCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); + if (ret) + return ret; + break; default: - pr_warn("Unknown asic for smu12\n"); + return -EINVAL; } + + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 15590fd86ef4..868e2d5f6e62 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -653,8 +653,8 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); - uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; - uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + uint16_t HiSidd; + uint16_t LoSidd; struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index da025b1d302d..32ebb383c456 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -940,7 +940,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, { int result; /* PP_Clocks minClocks; */ - uint32_t threshold, mvdd; + uint32_t mvdd; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -973,8 +973,6 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, level->VoltageDownHyst = 0; level->PowerThrottle = 0; - threshold = clock * data->fast_watermark_threshold / 100; - data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) @@ -1501,7 +1499,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, uint32_t dram_timing; uint32_t dram_timing2; uint32_t burstTime; - ULONG state, trrds, trrdl; + ULONG trrds, trrdl; int result; result = atomctrl_set_engine_dram_timings_rv770(hwmgr, @@ -1513,7 +1511,6 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); - state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0); trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0); trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 3f12cf341511..aa0ee2b46135 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index 4728aa23a818..7dca04a89217 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr) uint32_t tmp; int ret = 0; struct cgs_firmware_info info = {0}; - struct smu8_smumgr *smu8_smu; if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - smu8_smu = hwmgr->smu_backend; ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c index 742b3dc1f6cb..adfbcbe5d113 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -61,15 +61,29 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr) uint32_t reg; uint32_t ret; - reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + /* Due to the L1 policy problem under SRIOV, we have to use + * mmMP1_SMN_C2PMSG_103 as the driver response register + */ + if (hwmgr->pp_one_vf) { + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103); - ret = phm_wait_for_register_unequal(hwmgr, reg, - 0, MP1_C2PMSG_90__CONTENT_MASK); + ret = phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_103__CONTENT_MASK); - if (ret) - pr_err("No response from smu\n"); + if (ret) + pr_err("No response from smu\n"); - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103); + } else { + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + + ret = phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + if (ret) + pr_err("No response from smu\n"); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); + } } /* @@ -83,7 +97,11 @@ static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, { struct amdgpu_device *adev = hwmgr->adev; - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + if (hwmgr->pp_one_vf) { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg); + } else { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + } return 0; } @@ -101,7 +119,10 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) smu9_wait_for_response(hwmgr); - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + if (hwmgr->pp_one_vf) + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); + else + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); smu9_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -127,9 +148,17 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, smu9_wait_for_response(hwmgr); - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + /* Due to the L1 policy problem under SRIOV, we have to use + * mmMP1_SMN_C2PMSG_101 as the driver message register and + * mmMP1_SMN_C2PMSG_102 as the driver parameter register. + */ + if (hwmgr->pp_one_vf) { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter); + } else { + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + } smu9_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -144,5 +173,8 @@ uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); + if (hwmgr->pp_one_vf) + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102); + else + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 0dbdde69f2d9..39427ca32a15 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -71,6 +71,12 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, { struct vega10_smumgr *priv = hwmgr->smu_backend; + /* under sriov, vbios or hypervisor driver + * has already copy table to smc so here only skip it + */ + if (!hwmgr->not_vf) + return 0; + PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, @@ -100,6 +106,14 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, int msg = enable ? PPSMC_MSG_EnableSmuFeatures : PPSMC_MSG_DisableSmuFeatures; + /* VF has no permission to change smu feature due + * to security concern even under pp one vf mode + * it still can't do it. For vega10, the smu in + * vbios will enable the appropriate features. + * */ + if (!hwmgr->not_vf) + return 0; + return smum_send_msg_to_smc_with_parameter(hwmgr, msg, feature_mask); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index f9589806bf83..90c782c132d2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, return -EINVAL); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index b9089c6bea85..f604612f411f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index ae18fbcb26fb..b0e0d67cd54b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -1114,7 +1114,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, (struct phm_ppt_v1_information *)(hwmgr->pptable); SMIO_Pattern vol_level; uint32_t mvdd; - uint16_t us_mvdd; table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; @@ -1168,17 +1167,6 @@ static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, "in Clock Dependency Table", ); - us_mvdd = 0; - if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || - (data->mclk_dpm_key_disabled)) - us_mvdd = data->vbios_boot_state.mvdd_bootup_value; - else { - if (!vegam_populate_mvdd_value(hwmgr, - data->dpm_table.mclk_table.dpm_levels[0].value, - &vol_level)) - us_mvdd = vol_level.Voltage; - } - if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level)) table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); else @@ -1383,11 +1371,16 @@ static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr, result = phm_find_boot_level(&(data->dpm_table.sclk_table), data->vbios_boot_state.sclk_bootup_value, (uint32_t *)&(table->GraphicsBootLevel)); + if (result) + return result; result = phm_find_boot_level(&(data->dpm_table.mclk_table), data->vbios_boot_state.mclk_bootup_value, (uint32_t *)&(table->MemoryBootLevel)); + if (result) + return result; + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; table->BootVddci = data->vbios_boot_state.vddci_bootup_value * @@ -1493,7 +1486,7 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + uint8_t i, stretch_amount, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1532,11 +1525,9 @@ static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5; /* Populate CKS Lookup Table */ - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) - stretch_amount2 = 0; - else if (stretch_amount == 3 || stretch_amount == 4) - stretch_amount2 = 1; - else { + if (!(stretch_amount == 1 || stretch_amount == 2 || + stretch_amount == 5 || stretch_amount == 3 || + stretch_amount == 4)) { phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher); PP_ASSERT_WITH_CODE(false, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bbd8ebd58434..12bcc3e3ba99 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PrepareMp1ForShutdown), MSG_MAP(SetMGpuFanBoostLimitRpm), MSG_MAP(GetAVFSVoltageByDpm), + MSG_MAP(DFCstateControl), }; static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = { @@ -219,7 +221,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; @@ -464,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) sizeof(PPTable_t)); table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; - table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); return 0; } @@ -634,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) !smu_dpm_ctx->dpm_current_power_state) return -EINVAL; - mutex_lock(&(smu->mutex)); switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { case SMU_STATE_UI_LABEL_BATTERY: pm_type = POWER_STATE_TYPE_BATTERY; @@ -652,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) pm_type = POWER_STATE_TYPE_DEFAULT; break; } - mutex_unlock(&(smu->mutex)); return pm_type; } @@ -1274,16 +1273,8 @@ static int vega20_force_clk_levels(struct smu_context *smu, struct vega20_dpm_table *dpm_table; struct vega20_single_dpm_table *single_dpm_table; uint32_t soft_min_level, soft_max_level, hard_min_level; - struct smu_dpm_context *smu_dpm = &smu->smu_dpm; int ret = 0; - if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { - pr_info("force clock level is for dpm manual mode only.\n"); - return -EINVAL; - } - - mutex_lock(&(smu->mutex)); - soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -1436,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; } - mutex_unlock(&(smu->mutex)); return ret; } @@ -1451,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, dpm_table = smu_dpm->dpm_context; - mutex_lock(&smu->mutex); - switch (clk_type) { case SMU_GFXCLK: single_dpm_table = &(dpm_table->gfx_table); @@ -1474,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, ret = -EINVAL; } - mutex_unlock(&smu->mutex); return ret; } @@ -2260,7 +2247,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; - if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) { + if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, @@ -2547,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu, int feature_enabled; PPCLK_e clk_id; - mutex_lock(&(smu->mutex)); - dpm_table = smu_dpm->dpm_context; golden_table = smu_dpm->golden_dpm_context; @@ -2598,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu, } ret = smu_handle_task(smu, smu_dpm->dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE); + AMD_PP_TASK_READJUST_POWER_STATE, + false); set_od_failed: - mutex_unlock(&(smu->mutex)); - return ret; } @@ -2827,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, } if (type == PP_OD_COMMIT_DPM_TABLE) { - mutex_lock(&(smu->mutex)); ret = smu_handle_task(smu, smu_dpm->dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE); - mutex_unlock(&(smu->mutex)); + AMD_PP_TASK_READJUST_POWER_STATE, + false); } return ret; @@ -3047,7 +3030,7 @@ static int vega20_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -3141,6 +3124,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu, return 0; } +static int vega20_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + uint32_t smu_version; + int ret; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ + if (smu_version < 0x283200) { + pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); + return -EINVAL; + } + + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); +} + +static int vega20_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret, i; + uint32_t smu_pcie_arg; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | + ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : + (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? + pptable->PcieLaneCount[i] : pcie_width_cap); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg); + } + + return ret; +} + + static const struct pptable_funcs vega20_ppt_funcs = { .tables_init = vega20_tables_init, .alloc_dpm_context = vega20_allocate_dpm_context, @@ -3153,7 +3179,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_smu_table_index = vega20_get_smu_table_index, .get_smu_power_index = vega20_get_pwr_src_index, .get_workload_type = vega20_get_workload_type, - .run_afll_btc = vega20_run_btc_afll, + .run_btc = vega20_run_btc_afll, .get_allowed_feature_mask = vega20_get_allowed_feature_mask, .get_current_power_state = vega20_get_current_power_state, .set_default_dpm_table = vega20_set_default_dpm_table, @@ -3183,13 +3209,61 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_fan_speed_percent = vega20_get_fan_speed_percent, .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, - .get_thermal_temperature_range = vega20_get_thermal_temperature_range + .get_thermal_temperature_range = vega20_get_thermal_temperature_range, + .set_df_cstate = vega20_set_df_cstate, + .update_pcie_parameters = vega20_update_pcie_parameters, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_enter = smu_v11_0_baco_enter, + .baco_exit = smu_v11_0_baco_exit, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, }; void vega20_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &vega20_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index dfaddbb7da0d..8ae1e1f97a73 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -20,9 +20,10 @@ #define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1)) -static struct simplefb_format supported_formats[] = { - { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, - { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, +static const u32 arc_pgu_supported_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, }; static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc) @@ -30,22 +31,24 @@ static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc) struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); const struct drm_framebuffer *fb = crtc->primary->state->fb; uint32_t pixel_format = fb->format->format; - struct simplefb_format *format = NULL; + u32 format = DRM_FORMAT_INVALID; int i; + u32 reg_ctrl; - for (i = 0; i < ARRAY_SIZE(supported_formats); i++) { - if (supported_formats[i].fourcc == pixel_format) - format = &supported_formats[i]; + for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) { + if (arc_pgu_supported_formats[i] == pixel_format) + format = arc_pgu_supported_formats[i]; } - if (WARN_ON(!format)) + if (WARN_ON(format == DRM_FORMAT_INVALID)) return; - if (format->fourcc == DRM_FORMAT_RGB888) - arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, - arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) | - ARCPGU_MODE_RGB888_MASK); - + reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL); + if (format == DRM_FORMAT_RGB565) + reg_ctrl &= ~ARCPGU_MODE_XRGB8888; + else + reg_ctrl |= ARCPGU_MODE_XRGB8888; + arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl); } static const struct drm_crtc_funcs arc_pgu_crtc_funcs = { @@ -193,18 +196,15 @@ static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; struct drm_plane *plane = NULL; - u32 formats[ARRAY_SIZE(supported_formats)], i; int ret; plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); if (!plane) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(supported_formats); i++) - formats[i] = supported_formats[i].fourcc; - ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs, - formats, ARRAY_SIZE(formats), + arc_pgu_supported_formats, + ARRAY_SIZE(arc_pgu_supported_formats), NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 6b7f791685ec..d6a6692db0ac 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <linux/dma-mapping.h> #include <linux/module.h> @@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct arcpgu_drm_private *arcpgu; - struct device_node *encoder_node; + struct device_node *encoder_node = NULL, *endpoint_node = NULL; struct resource *res; int ret; @@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm) if (arc_pgu_setup_crtc(drm) < 0) return -ENODEV; - /* find the encoder node and initialize it */ - encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); + /* + * There is only one output port inside each device. It is linked with + * encoder endpoint. + */ + endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (endpoint_node) { + encoder_node = of_graph_get_remote_port_parent(endpoint_node); + of_node_put(endpoint_node); + } + if (encoder_node) { ret = arcpgu_drm_hdmi_init(drm, encoder_node); of_node_put(encoder_node); if (ret < 0) return ret; } else { + dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n"); ret = arcpgu_drm_sim_init(drm, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h index dab2c380f7f3..b689a382d556 100644 --- a/drivers/gpu/drm/arc/arcpgu_regs.h +++ b/drivers/gpu/drm/arc/arcpgu_regs.h @@ -25,7 +25,7 @@ #define ARCPGU_CTRL_VS_POL_OFST 0x3 #define ARCPGU_CTRL_HS_POL_MASK 0x1 #define ARCPGU_CTRL_HS_POL_OFST 0x4 -#define ARCPGU_MODE_RGB888_MASK 0x04 +#define ARCPGU_MODE_XRGB8888 BIT(2) #define ARCPGU_STAT_BUSY_MASK 0x02 #endif diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 9a7dcf92591a..442d4656150a 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -84,7 +84,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, 0); + drm_atomic_helper_commit_planes(dev, old_state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_modeset_enables(dev, old_state); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index 0930234abb9d..8f32ae7c25d0 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -581,8 +581,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter, } if (!in_range(&splitter->vsize, dflow->in_h)) { - DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n", - dflow->in_w); + DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n", + dflow->in_h); return -EINVAL; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 9917ce0d86a0..56f55c53abfd 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -735,13 +735,13 @@ static int anx6345_i2c_probe(struct i2c_client *client, /* Map slave addresses of ANX6345 */ for (i = 0; i < I2C_NUM_ADDRESSES; i++) { if (anx6345_i2c_addresses[i] >> 1 != client->addr) - anx6345->i2c_clients[i] = i2c_new_dummy(client->adapter, + anx6345->i2c_clients[i] = i2c_new_dummy_device(client->adapter, anx6345_i2c_addresses[i] >> 1); else anx6345->i2c_clients[i] = client; - if (!anx6345->i2c_clients[i]) { - err = -ENOMEM; + if (IS_ERR(anx6345->i2c_clients[i])) { + err = PTR_ERR(anx6345->i2c_clients[i]); DRM_ERROR("Failed to reserve I2C bus %02x\n", anx6345_i2c_addresses[i]); goto err_unregister_i2c; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index 20f4f92dd866..d7e65c869415 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -160,12 +160,23 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component, return -EINVAL; } +static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct dw_hdmi_i2s_audio_data *audio = data; + struct dw_hdmi *hdmi = audio->hdmi; + + return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev); +} + static struct hdmi_codec_ops dw_hdmi_i2s_ops = { .hw_params = dw_hdmi_i2s_hw_params, .audio_startup = dw_hdmi_i2s_audio_startup, .audio_shutdown = dw_hdmi_i2s_audio_shutdown, .get_eld = dw_hdmi_i2s_get_eld, .get_dai_id = dw_hdmi_i2s_get_dai_id, + .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index dbe38a54870b..67fca439bbfb 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -194,6 +194,10 @@ struct dw_hdmi { struct mutex cec_notifier_mutex; struct cec_notifier *cec_notifier; + + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + enum drm_connector_status last_connector_result; }; #define HDMI_IH_PHY_STAT0_RX_SENSE \ @@ -218,6 +222,28 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset) return val; } +static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged) +{ + if (hdmi->plugged_cb && hdmi->codec_dev) + hdmi->plugged_cb(hdmi->codec_dev, plugged); +} + +int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + bool plugged; + + mutex_lock(&hdmi->mutex); + hdmi->plugged_cb = fn; + hdmi->codec_dev = codec_dev; + plugged = hdmi->last_connector_result == connector_status_connected; + handle_plugged_change(hdmi, plugged); + mutex_unlock(&hdmi->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb); + static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg) { regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data); @@ -2229,6 +2255,7 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); + enum drm_connector_status result; mutex_lock(&hdmi->mutex); hdmi->force = DRM_FORCE_UNSPECIFIED; @@ -2236,7 +2263,18 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force) dw_hdmi_update_phy_mask(hdmi); mutex_unlock(&hdmi->mutex); - return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); + result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); + + mutex_lock(&hdmi->mutex); + if (result != hdmi->last_connector_result) { + dev_dbg(hdmi->dev, "read_hpd result: %d", result); + handle_plugged_change(hdmi, + result == connector_status_connected); + hdmi->last_connector_result = result; + } + mutex_unlock(&hdmi->mutex); + + return result; } static int dw_hdmi_connector_get_modes(struct drm_connector *connector) @@ -2731,6 +2769,7 @@ __dw_hdmi_probe(struct platform_device *pdev, hdmi->rxsense = true; hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE); hdmi->mc_clkdis = 0x7f; + hdmi->last_connector_result = connector_status_disconnected; mutex_init(&hdmi->mutex); mutex_init(&hdmi->audio_mutex); diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index aa3198dc9903..6f6d6d1e60ae 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -285,8 +285,8 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi) else dvi->connector_type = DRM_MODE_CONNECTOR_DVID; - dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode, - "hpd-gpios", 0, GPIOD_IN, "hpd"); + dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode, + "hpd", 0, GPIOD_IN, "hpd"); if (IS_ERR(dvi->hpd)) { ret = PTR_ERR(dvi->hpd); dvi->hpd = NULL; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 57201e36b8b7..4511c2e07bb9 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1582,8 +1582,11 @@ static void commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; const struct drm_mode_config_helper_funcs *funcs; + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; ktime_t start; s64 commit_time_ms; + unsigned int i, new_self_refresh_mask = 0; funcs = dev->mode_config.helper_private; @@ -1603,6 +1606,15 @@ static void commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_wait_for_dependencies(old_state); + /* + * We cannot safely access new_crtc_state after + * drm_atomic_helper_commit_hw_done() so figure out which crtc's have + * self-refresh active beforehand: + */ + for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) + if (new_crtc_state->self_refresh_active) + new_self_refresh_mask |= BIT(i); + if (funcs && funcs->atomic_commit_tail) funcs->atomic_commit_tail(old_state); else @@ -1611,7 +1623,8 @@ static void commit_tail(struct drm_atomic_state *old_state) commit_time_ms = ktime_ms_delta(ktime_get(), start); if (commit_time_ms > 0) drm_self_refresh_helper_update_avg_times(old_state, - (unsigned long)commit_time_ms); + (unsigned long)commit_time_ms, + new_self_refresh_mask); drm_atomic_helper_commit_cleanup_done(old_state); diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index d0a937fb0c56..7cf3cf936547 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -58,6 +58,22 @@ */ /** + * __drm_atomic_helper_crtc_state_reset - reset the CRTC state + * @crtc_state: atomic CRTC state, must not be NULL + * @crtc: CRTC object, must not be NULL + * + * Initializes the newly allocated @crtc_state with default + * values. This is useful for drivers that subclass the CRTC state. + */ +void +__drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *crtc_state, + struct drm_crtc *crtc) +{ + crtc_state->crtc = crtc; +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_state_reset); + +/** * __drm_atomic_helper_crtc_reset - reset state on CRTC * @crtc: drm CRTC * @crtc_state: CRTC state to assign @@ -74,7 +90,7 @@ __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { if (crtc_state) - crtc_state->crtc = crtc; + __drm_atomic_helper_crtc_state_reset(crtc_state, crtc); crtc->state = crtc_state; } @@ -212,23 +228,43 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); /** - * __drm_atomic_helper_plane_reset - resets planes state to default values + * __drm_atomic_helper_plane_state_reset - resets plane state to default values + * @plane_state: atomic plane state, must not be NULL * @plane: plane object, must not be NULL - * @state: atomic plane state, must not be NULL * - * Initializes plane state to default. This is useful for drivers that subclass - * the plane state. + * Initializes the newly allocated @plane_state with default + * values. This is useful for drivers that subclass the CRTC state. */ -void __drm_atomic_helper_plane_reset(struct drm_plane *plane, - struct drm_plane_state *state) +void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, + struct drm_plane *plane) { - state->plane = plane; - state->rotation = DRM_MODE_ROTATE_0; + plane_state->plane = plane; + plane_state->rotation = DRM_MODE_ROTATE_0; - state->alpha = DRM_BLEND_ALPHA_OPAQUE; - state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; + plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE; + plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI; +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset); - plane->state = state; +/** + * __drm_atomic_helper_plane_reset - reset state on plane + * @plane: drm plane + * @plane_state: plane state to assign + * + * Initializes the newly allocated @plane_state and assigns it to + * the &drm_crtc->state pointer of @plane, usually required when + * initializing the drivers or when called from the &drm_plane_funcs.reset + * hook. + * + * This is useful for drivers that subclass the plane state. + */ +void __drm_atomic_helper_plane_reset(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + if (plane_state) + __drm_atomic_helper_plane_state_reset(plane_state, plane); + + plane->state = plane_state; } EXPORT_SYMBOL(__drm_atomic_helper_plane_reset); @@ -336,6 +372,22 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); /** + * __drm_atomic_helper_connector_state_reset - reset the connector state + * @conn_state: atomic connector state, must not be NULL + * @connector: connectotr object, must not be NULL + * + * Initializes the newly allocated @conn_state with default + * values. This is useful for drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state, + struct drm_connector *connector) +{ + conn_state->connector = connector; +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_state_reset); + +/** * __drm_atomic_helper_connector_reset - reset state on connector * @connector: drm connector * @conn_state: connector state to assign @@ -352,7 +404,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector, struct drm_connector_state *conn_state) { if (conn_state) - conn_state->connector = connector; + __drm_atomic_helper_connector_state_reset(conn_state, connector); connector->state = conn_state; } diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 0965632008a9..2166000ed057 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -712,7 +712,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) __drm_connector_put_safe(iter->conn); spin_unlock_irqrestore(&config->connector_list_lock, flags); } - lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); + lock_release(&connector_list_iter_dep_map, _RET_IP_); } EXPORT_SYMBOL(drm_connector_list_iter_end); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 1770754bcd4a..e68d23043973 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -28,6 +28,13 @@ #include <linux/sched.h> #include <linux/seq_file.h> +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stacktrace.h> +#include <linux/sort.h> +#include <linux/timekeeping.h> +#include <linux/math64.h> +#endif + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> @@ -45,6 +52,12 @@ * protocol. The helpers contain a topology manager and bandwidth manager. * The helpers encapsulate the sending and received of sideband msgs. */ +struct drm_dp_pending_up_req { + struct drm_dp_sideband_msg_hdr hdr; + struct drm_dp_sideband_msg_req_body msg; + struct list_head next; +}; + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf); @@ -61,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, @@ -1411,39 +1424,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) } EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); -static void drm_dp_destroy_mst_branch_device(struct kref *kref) +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + +#define STACK_DEPTH 8 + +static noinline void +__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_ref_history *history, + enum drm_dp_mst_topology_ref_type type) { - struct drm_dp_mst_branch *mstb = - container_of(kref, struct drm_dp_mst_branch, topology_kref); - struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; - struct drm_dp_mst_port *port, *tmp; - bool wake_tx = false; + struct drm_dp_mst_topology_ref_entry *entry = NULL; + depot_stack_handle_t backtrace; + ulong stack_entries[STACK_DEPTH]; + uint n; + int i; - mutex_lock(&mgr->lock); - list_for_each_entry_safe(port, tmp, &mstb->ports, next) { - list_del(&port->next); - drm_dp_mst_topology_put_port(port); + n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); + backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); + if (!backtrace) + return; + + /* Try to find an existing entry for this backtrace */ + for (i = 0; i < history->len; i++) { + if (history->entries[i].backtrace == backtrace) { + entry = &history->entries[i]; + break; + } } - mutex_unlock(&mgr->lock); - /* drop any tx slots msg */ - mutex_lock(&mstb->mgr->qlock); - if (mstb->tx_slots[0]) { - mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[0] = NULL; - wake_tx = true; + /* Otherwise add one */ + if (!entry) { + struct drm_dp_mst_topology_ref_entry *new; + int new_len = history->len + 1; + + new = krealloc(history->entries, sizeof(*new) * new_len, + GFP_KERNEL); + if (!new) + return; + + entry = &new[history->len]; + history->len = new_len; + history->entries = new; + + entry->backtrace = backtrace; + entry->type = type; + entry->count = 0; } - if (mstb->tx_slots[1]) { - mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[1] = NULL; - wake_tx = true; + entry->count++; + entry->ts_nsec = ktime_get_ns(); +} + +static int +topology_ref_history_cmp(const void *a, const void *b) +{ + const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; + + if (entry_a->ts_nsec > entry_b->ts_nsec) + return 1; + else if (entry_a->ts_nsec < entry_b->ts_nsec) + return -1; + else + return 0; +} + +static inline const char * +topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) +{ + if (type == DRM_DP_MST_TOPOLOGY_REF_GET) + return "get"; + else + return "put"; +} + +static void +__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, + void *ptr, const char *type_str) +{ + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + int i; + + if (!buf) + return; + + if (!history->len) + goto out; + + /* First, sort the list so that it goes from oldest to newest + * reference entry + */ + sort(history->entries, history->len, sizeof(*history->entries), + topology_ref_history_cmp, NULL); + + drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", + type_str, ptr); + + for (i = 0; i < history->len; i++) { + const struct drm_dp_mst_topology_ref_entry *entry = + &history->entries[i]; + ulong *entries; + uint nr_entries; + u64 ts_nsec = entry->ts_nsec; + u32 rem_nsec = do_div(ts_nsec, 1000000000); + + nr_entries = stack_depot_fetch(entry->backtrace, &entries); + stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + + drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", + entry->count, + topology_ref_type_to_str(entry->type), + ts_nsec, rem_nsec / 1000, buf); } - mutex_unlock(&mstb->mgr->qlock); - if (wake_tx) - wake_up_all(&mstb->mgr->tx_waitq); + /* Now free the history, since this is the only time we expose it */ + kfree(history->entries); +out: + kfree(buf); +} - drm_dp_mst_put_mstb_malloc(mstb); +static __always_inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) +{ + __dump_topology_ref_history(&mstb->topology_ref_history, mstb, + "MSTB"); +} + +static __always_inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) +{ + __dump_topology_ref_history(&port->topology_ref_history, port, + "Port"); +} + +static __always_inline void +save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); +} + +static __always_inline void +save_port_topology_ref(struct drm_dp_mst_port *port, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(port->mgr, &port->topology_ref_history, type); +} + +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->topology_ref_history_lock); +} + +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_unlock(&mgr->topology_ref_history_lock); +} +#else +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} +static inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} +#define save_mstb_topology_ref(mstb, type) +#define save_port_topology_ref(port, type) +#endif + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + + drm_dp_mst_dump_mstb_topology_history(mstb); + + INIT_LIST_HEAD(&mstb->destroy_next); + + /* + * This can get called under mgr->mutex, so we need to perform the + * actual destruction of the mstb in another worker + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1471,11 +1639,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) { - int ret = kref_get_unless_zero(&mstb->topology_kref); + int ret; - if (ret) - DRM_DEBUG("mstb %p (%d)\n", mstb, - kref_read(&mstb->topology_kref)); + topology_ref_history_lock(mstb->mgr); + ret = kref_get_unless_zero(&mstb->topology_kref); + if (ret) { + DRM_DEBUG("mstb %p (%d)\n", + mstb, kref_read(&mstb->topology_kref)); + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); + } + + topology_ref_history_unlock(mstb->mgr); return ret; } @@ -1496,9 +1670,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) */ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); WARN_ON(kref_read(&mstb->topology_kref) == 0); kref_get(&mstb->topology_kref); DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); + + topology_ref_history_unlock(mstb->mgr); } /** @@ -1516,27 +1695,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); - kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); -} + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); -static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) -{ - struct drm_dp_mst_branch *mstb; - - switch (old_pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mstb = port->mstb; - port->mstb = NULL; - drm_dp_mst_topology_put_mstb(mstb); - break; - } + topology_ref_history_unlock(mstb->mgr); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); } static void drm_dp_destroy_port(struct kref *kref) @@ -1545,31 +1711,24 @@ static void drm_dp_destroy_port(struct kref *kref) container_of(kref, struct drm_dp_mst_port, topology_kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; - if (!port->input) { - kfree(port->cached_edid); + drm_dp_mst_dump_port_topology_history(port); - /* - * The only time we don't have a connector - * on an output port is if the connector init - * fails. - */ - if (port->connector) { - /* we can't destroy the connector here, as - * we might be holding the mode_config.mutex - * from an EDID retrieval */ - - mutex_lock(&mgr->destroy_connector_lock); - list_add(&port->next, &mgr->destroy_connector_list); - mutex_unlock(&mgr->destroy_connector_lock); - schedule_work(&mgr->destroy_connector_work); - return; - } - /* no need to clean up vcpi - * as if we have no connector we never setup a vcpi */ - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + /* There's nothing that needs locking to destroy an input port yet */ + if (port->input) { + drm_dp_mst_put_port_malloc(port); + return; } - drm_dp_mst_put_port_malloc(port); + + kfree(port->cached_edid); + + /* + * we can't destroy the connector here, as we might be holding the + * mode_config.mutex from an EDID retrieval + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&port->next, &mgr->destroy_port_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1597,12 +1756,17 @@ static void drm_dp_destroy_port(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) { - int ret = kref_get_unless_zero(&port->topology_kref); + int ret; - if (ret) - DRM_DEBUG("port %p (%d)\n", port, - kref_read(&port->topology_kref)); + topology_ref_history_lock(port->mgr); + ret = kref_get_unless_zero(&port->topology_kref); + if (ret) { + DRM_DEBUG("port %p (%d)\n", + port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + } + topology_ref_history_unlock(port->mgr); return ret; } @@ -1621,9 +1785,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + WARN_ON(kref_read(&port->topology_kref) == 0); kref_get(&port->topology_kref); DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + + topology_ref_history_unlock(port->mgr); } /** @@ -1639,8 +1808,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); + + topology_ref_history_unlock(port->mgr); kref_put(&port->topology_kref, drm_dp_destroy_port); } @@ -1757,38 +1931,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -/* - * return sends link address for new mstb - */ -static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) { - int ret; - u8 rad[6], lct; - bool send_link = false; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + struct drm_dp_mst_branch *mstb; + u8 rad[8], lct; + int ret = 0; + + if (port->pdt == new_pdt) + return 0; + + /* Teardown the old pdt, if there is one */ + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* + * If the new PDT would also have an i2c bus, don't bother + * with reregistering it + */ + if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + new_pdt == DP_PEER_DEVICE_SST_SINK) { + port->pdt = new_pdt; + return 0; + } + + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + break; + } + + port->pdt = new_pdt; switch (port->pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); break; + case DP_PEER_DEVICE_MST_BRANCHING: lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", port); + goto out; + } - port->mstb = drm_dp_add_mst_branch_device(lct, rad); - if (port->mstb) { - port->mstb->mgr = port->mgr; - port->mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - send_link = true; - } + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); + + /* And make sure we send a link address for this */ + ret = 1; break; } - return send_link; + +out: + if (ret < 0) + port->pdt = DP_PEER_DEVICE_NONE; + return ret; } /** @@ -1921,44 +2136,130 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); static void +drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + char proppath[255]; + int ret; + + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = mgr->cbs->add_connector(mgr, port, proppath); + if (!port->connector) { + ret = -ENOMEM; + goto error; + } + + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { + port->cached_edid = drm_get_edid(port->connector, + &port->aux.ddc); + drm_connector_set_tile_property(port->connector); + } + + mgr->cbs->register_connector(port->connector); + return; + +error: + DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); +} + +/* + * Drop a topology reference, and unlink the port from the in-memory topology + * layout + */ +static void +drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + mutex_lock(&mgr->lock); + list_del(&port->next); + mutex_unlock(&mgr->lock); + drm_dp_mst_topology_put_port(port); +} + +static struct drm_dp_mst_port * +drm_dp_mst_add_port(struct drm_device *dev, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, u8 port_number) +{ + struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); + + if (!port) + return NULL; + + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); + port->parent = mstb; + port->port_num = port_number; + port->mgr = mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev->dev; + port->aux.is_remote = true; + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + + return port; +} + +static int drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_device *dev, struct drm_dp_link_addr_reply_port *port_msg) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - bool ret; - bool created = false; - int old_pdt = 0; - int old_ddps = 0; + int old_ddps = 0, ret; + u8 new_pdt = DP_PEER_DEVICE_NONE; + bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); if (!port) { - port = kzalloc(sizeof(*port), GFP_KERNEL); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); if (!port) - return; - kref_init(&port->topology_kref); - kref_init(&port->malloc_kref); - port->parent = mstb; - port->port_num = port_msg->port_number; - port->mgr = mstb->mgr; - port->aux.name = "DPMST"; - port->aux.dev = dev->dev; - port->aux.is_remote = true; - - /* - * Make sure the memory allocation for our parent branch stays - * around until our own memory allocation is released + return -ENOMEM; + created = true; + changed = true; + } else if (!port->input && port_msg->input_port && port->connector) { + /* Since port->connector can't be changed here, we create a + * new port if input_port changes from 0 to 1 */ - drm_dp_mst_get_mstb_malloc(mstb); - + drm_dp_mst_topology_unlink_port(mgr, port); + drm_dp_mst_topology_put_port(port); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); + if (!port) + return -ENOMEM; + changed = true; created = true; - } else { - old_pdt = port->pdt; + } else if (port->input && !port_msg->input_port) { + changed = true; + } else if (port->connector) { + /* We're updating a port that's exposed to userspace, so do it + * under lock + */ + drm_modeset_lock(&mgr->base.lock, NULL); + old_ddps = port->ddps; + changed = port->ddps != port_msg->ddps || + (port->ddps && + (port->ldps != port_msg->legacy_device_plug_status || + port->dpcd_rev != port_msg->dpcd_revision || + port->mcs != port_msg->mcs || + port->pdt != port_msg->peer_device_type || + port->num_sdp_stream_sinks != + port_msg->num_sdp_stream_sinks)); } - port->pdt = port_msg->peer_device_type; port->input = port_msg->input_port; + if (!port->input) + new_pdt = port_msg->peer_device_type; port->mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; @@ -1969,78 +2270,104 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, /* manage mstb port lists with mgr lock - take a reference for this list */ if (created) { - mutex_lock(&mstb->mgr->lock); + mutex_lock(&mgr->lock); drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); - mutex_unlock(&mstb->mgr->lock); + mutex_unlock(&mgr->lock); } if (old_ddps != port->ddps) { if (port->ddps) { if (!port->input) { - drm_dp_send_enum_path_resources(mstb->mgr, - mstb, port); + drm_dp_send_enum_path_resources(mgr, mstb, + port); } } else { port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - - ret = drm_dp_port_setup_pdt(port); - if (ret == true) - drm_dp_send_link_address(mstb->mgr, port->mstb); + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + send_link_addr = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT on port %p: %d\n", + port, ret); + goto fail; } - if (created && !port->input) { - char proppath[255]; - - build_mst_prop_path(mstb, port->port_num, proppath, - sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, - port, - proppath); - if (!port->connector) { - /* remove it from the port list */ - mutex_lock(&mstb->mgr->lock); - list_del(&port->next); - mutex_unlock(&mstb->mgr->lock); - /* drop port list reference */ - drm_dp_mst_topology_put_port(port); - goto out; - } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, - &port->aux.ddc); - drm_connector_set_tile_property(port->connector); - } - (*mstb->mgr->cbs->register_connector)(port->connector); + /* + * If this port wasn't just created, then we're reprobing because + * we're coming out of suspend. In this case, always resend the link + * address if there's an MSTB on this port + */ + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + send_link_addr = true; + + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (!port->input) + drm_dp_mst_port_add_connector(mstb, port); + + if (send_link_addr && port->mstb) { + ret = drm_dp_send_link_address(mgr, port->mstb); + if (ret == 1) /* MSTB below us changed */ + changed = true; + else if (ret < 0) + goto fail_put; } -out: /* put reference to this port */ drm_dp_mst_topology_put_port(port); + return changed; + +fail: + drm_dp_mst_topology_unlink_port(mgr, port); + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); +fail_put: + drm_dp_mst_topology_put_port(port); + return ret; } static void drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, struct drm_dp_connection_status_notify *conn_stat) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_pdt; - int old_ddps; - bool dowork = false; + int old_ddps, ret; + u8 new_pdt; + bool dowork = false, create_connector = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); if (!port) return; + if (port->connector) { + if (!port->input && conn_stat->input_port) { + /* + * We can't remove a connector from an already exposed + * port, so just throw the port out and make sure we + * reprobe the link address of it's parent MSTB + */ + drm_dp_mst_topology_unlink_port(mgr, port); + mstb->link_address_sent = false; + dowork = true; + goto out; + } + + /* Locking is only needed if the port's exposed to userspace */ + drm_modeset_lock(&mgr->base.lock, NULL); + } else if (port->input && !conn_stat->input_port) { + create_connector = true; + /* Reprobe link address so we get num_sdp_streams */ + mstb->link_address_sent = false; + dowork = true; + } + old_ddps = port->ddps; - old_pdt = port->pdt; - port->pdt = conn_stat->peer_device_type; + port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2052,17 +2379,27 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - if (drm_dp_port_setup_pdt(port)) - dowork = true; + new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; + + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + dowork = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT for port %p: %d\n", + port, ret); + dowork = false; } + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (create_connector) + drm_dp_mst_port_add_connector(mstb, port); + +out: drm_dp_mst_topology_put_port(port); if (dowork) queue_work(system_long_wq, &mstb->mgr->work); - } static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, @@ -2148,42 +2485,63 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, return mstb; } -static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_port *port; - struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) - drm_dp_send_link_address(mgr, mstb); + int ret; + bool changed = false; + + if (!mstb->link_address_sent) { + ret = drm_dp_send_link_address(mgr, mstb); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; + } list_for_each_entry(port, &mstb->ports, next) { - if (port->input) - continue; + struct drm_dp_mst_branch *mstb_child = NULL; - if (!port->ddps) + if (port->input || !port->ddps) continue; - if (!port->available_pbn) + if (!port->available_pbn) { + drm_modeset_lock(&mgr->base.lock, NULL); drm_dp_send_enum_path_resources(mgr, mstb, port); + drm_modeset_unlock(&mgr->base.lock); + changed = true; + } - if (port->mstb) { + if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); - if (mstb_child) { - drm_dp_check_and_send_link_address(mgr, mstb_child); - drm_dp_mst_topology_put_mstb(mstb_child); - } + + if (mstb_child) { + ret = drm_dp_check_and_send_link_address(mgr, + mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; } } + + return changed; } static void drm_dp_mst_link_probe_work(struct work_struct *work) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_device *dev = mgr->dev; struct drm_dp_mst_branch *mstb; int ret; bool clear_payload_id_table; + mutex_lock(&mgr->probe_lock); + mutex_lock(&mgr->lock); clear_payload_id_table = !mgr->payload_id_table_cleared; mgr->payload_id_table_cleared = true; @@ -2195,8 +2553,10 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) mstb = NULL; } mutex_unlock(&mgr->lock); - if (!mstb) + if (!mstb) { + mutex_unlock(&mgr->probe_lock); return; + } /* * Certain branch devices seem to incorrectly report an available_pbn @@ -2211,8 +2571,12 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) drm_dp_send_clear_payload_id_table(mgr, mstb); } - drm_dp_check_and_send_link_address(mgr, mstb); + ret = drm_dp_check_and_send_link_address(mgr, mstb); drm_dp_mst_topology_put_mstb(mstb); + + mutex_unlock(&mgr->probe_lock); + if (ret) + drm_kms_helper_hotplug_event(dev); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -2458,16 +2822,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) } } -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_link_address_ack_reply *reply; - int i, len, ret; + struct drm_dp_mst_port *port, *tmp; + int i, len, ret, port_mask = 0; + bool changed = false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return; + return -ENOMEM; txmsg->dst = mstb; len = build_link_address(txmsg); @@ -2493,16 +2859,39 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, drm_dp_check_mstb_guid(mstb, reply->guid); - for (i = 0; i < reply->nports; i++) - drm_dp_mst_handle_link_address_port(mstb, mgr->dev, - &reply->ports[i]); + for (i = 0; i < reply->nports; i++) { + port_mask |= BIT(reply->ports[i].port_number); + ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, + &reply->ports[i]); + if (ret == 1) + changed = true; + else if (ret < 0) + goto out; + } - drm_kms_helper_hotplug_event(mgr->dev); + /* Prune any ports that are currently a part of mstb in our in-memory + * topology, but were not seen in this link address. Usually this + * means that they were removed while the topology was out of sync, + * e.g. during suspend/resume + */ + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + if (port_mask & BIT(port->port_num)) + continue; + + DRM_DEBUG_KMS("port %d was not in link address, removing\n", + port->port_num); + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + changed = true; + } + mutex_unlock(&mgr->lock); out: if (ret <= 0) mstb->link_address_sent = false; kfree(txmsg); + return ret < 0 ? ret : changed; } void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, @@ -2844,9 +3233,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) drm_dp_mst_topology_put_port(port); } - for (i = 0; i < mgr->max_payloads; i++) { - if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) + for (i = 0; i < mgr->max_payloads; /* do nothing */) { + if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { + i++; continue; + } DRM_DEBUG_KMS("removing payload %d\n", i); for (j = i; j < mgr->max_payloads - 1; j++) { @@ -3143,6 +3534,23 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); +static void +drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + /* The link address will need to be re-sent on resume */ + mstb->link_address_sent = false; + + list_for_each_entry(port, &mstb->ports, next) { + /* The PBN for each port will also need to be re-probed */ + port->available_pbn = 0; + + if (port->mstb) + drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); + } +} + /** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend @@ -3156,62 +3564,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->up_req_work); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + flush_work(&mgr->delayed_destroy_work); + + mutex_lock(&mgr->lock); + if (mgr->mst_state && mgr->mst_primary) + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + mutex_unlock(&mgr->lock); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); /** * drm_dp_mst_topology_mgr_resume() - resume the MST manager * @mgr: manager to resume + * @sync: whether or not to perform topology reprobing synchronously * * This will fetch DPCD and see if the device is still there, * if it is, it will rewrite the MSTM control bits, and return. * - * if the device fails this returns -1, and the driver should do + * If the device fails this returns -1, and the driver should do * a full MST reprobe, in case we were undocked. + * + * During system resume (where it is assumed that the driver will be calling + * drm_atomic_helper_resume()) this function should be called beforehand with + * @sync set to true. In contexts like runtime resume where the driver is not + * expected to be calling drm_atomic_helper_resume(), this function should be + * called with @sync set to false in order to avoid deadlocking. + * + * Returns: -1 if the MST topology was removed while we were suspended, 0 + * otherwise. */ -int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync) { - int ret = 0; + int ret; + u8 guid[16]; mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; - if (mgr->mst_primary) { - int sret; - u8 guid[16]; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } - sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (sret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + goto out_fail; + } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); - if (ret < 0) { - DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); - /* Some hubs forget their guids after they resume */ - sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (sret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } - drm_dp_check_mstb_guid(mgr->mst_primary, guid); + /* + * For the final step of resuming the topology, we need to bring the + * state of our in-memory topology back into sync with reality. So, + * restart the probing process as if we're probing a new hub + */ + queue_work(system_long_wq, &mgr->work); + mutex_unlock(&mgr->lock); - ret = 0; - } else - ret = -1; + if (sync) { + DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); + flush_work(&mgr->work); + } -out_unlock: + return 0; + +out_fail: mutex_unlock(&mgr->lock); - return ret; + return -1; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); @@ -3328,12 +3763,78 @@ clear_down_rep_recv: return 0; } +static inline bool +drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_pending_up_req *up_req) +{ + struct drm_dp_mst_branch *mstb = NULL; + struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; + struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; + bool hotplug = false; + + if (hdr->broadcast) { + const u8 *guid = NULL; + + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) + guid = msg->u.conn_stat.guid; + else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) + guid = msg->u.resource_stat.guid; + + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); + } else { + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); + } + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr->lct); + return false; + } + + /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } + + drm_dp_mst_topology_put_mstb(mstb); + return hotplug; +} + +static void drm_dp_mst_up_req_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + up_req_work); + struct drm_dp_pending_up_req *up_req; + bool send_hotplug = false; + + mutex_lock(&mgr->probe_lock); + while (true) { + mutex_lock(&mgr->up_req_lock); + up_req = list_first_entry_or_null(&mgr->up_req_list, + struct drm_dp_pending_up_req, + next); + if (up_req) + list_del(&up_req->next); + mutex_unlock(&mgr->up_req_lock); + + if (!up_req) + break; + + send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); + kfree(up_req); + } + mutex_unlock(&mgr->probe_lock); + + if (send_hotplug) + drm_kms_helper_hotplug_event(mgr->dev); +} + static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { - struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; - struct drm_dp_mst_branch *mstb = NULL; - const u8 *guid; + struct drm_dp_pending_up_req *up_req; bool seqno; if (!drm_dp_get_one_sb_msg(mgr, true)) @@ -3342,56 +3843,53 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (!mgr->up_req_recv.have_eomt) return 0; - if (!hdr->broadcast) { - mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } + up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); + if (!up_req) { + DRM_ERROR("Not enough memory to process MST up req\n"); + return -ENOMEM; } + INIT_LIST_HEAD(&up_req->next); seqno = hdr->seqno; - drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) - guid = msg.u.conn_stat.guid; - else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) - guid = msg.u.resource_stat.guid; - else + if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && + up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { + DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", + up_req->msg.req_type); + kfree(up_req); goto out; - - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, - false); - - if (!mstb) { - mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } } - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + seqno, false); + + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + const struct drm_dp_connection_status_notify *conn_stat = + &up_req->msg.u.conn_stat; DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", - msg.u.conn_stat.port_number, - msg.u.conn_stat.legacy_device_plug_status, - msg.u.conn_stat.displayport_device_plug_status, - msg.u.conn_stat.message_capability_status, - msg.u.conn_stat.input_port, - msg.u.conn_stat.peer_device_type); + conn_stat->port_number, + conn_stat->legacy_device_plug_status, + conn_stat->displayport_device_plug_status, + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); + } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + const struct drm_dp_resource_status_notify *res_stat = + &up_req->msg.u.resource_stat; - drm_kms_helper_hotplug_event(mgr->dev); - } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", - msg.u.resource_stat.port_number, - msg.u.resource_stat.available_pbn); + res_stat->port_number, + res_stat->available_pbn); } - drm_dp_mst_topology_put_mstb(mstb); + up_req->hdr = *hdr; + mutex_lock(&mgr->up_req_lock); + list_add_tail(&up_req->next, &mgr->up_req_list); + mutex_unlock(&mgr->up_req_lock); + queue_work(system_long_wq, &mgr->up_req_work); + out: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -3438,22 +3936,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); /** * drm_dp_mst_detect_port() - get connection status for an MST port * @connector: DRM connector for this port + * @ctx: The acquisition context to use for grabbing locks * @mgr: manager for this port - * @port: unverified pointer to a port + * @port: pointer to a port * - * This returns the current connection state for a port. It validates the - * port pointer still exists so the caller doesn't require a reference + * This returns the current connection state for a port. */ -enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, - struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { - enum drm_connector_status status = connector_status_disconnected; + int ret; /* we need to search for the port in the mgr in case it's gone */ port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return connector_status_disconnected; + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + goto out; + + ret = connector_status_disconnected; + if (!port->ddps) goto out; @@ -3463,7 +3970,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_SST_SINK: - status = connector_status_connected; + ret = connector_status_connected; /* for logical ports - cache the EDID */ if (port->port_num >= 8 && !port->cached_edid) { port->cached_edid = drm_get_edid(connector, &port->aux.ddc); @@ -3471,12 +3978,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) - status = connector_status_connected; + ret = connector_status_connected; break; } out: drm_dp_mst_topology_put_port(port); - return status; + return ret; } EXPORT_SYMBOL(drm_dp_mst_detect_port); @@ -4066,34 +4573,103 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } -static void drm_dp_destroy_connector_work(struct work_struct *work) +static inline void +drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); - struct drm_dp_mst_port *port; - bool send_hotplug = false; + if (port->connector) + port->mgr->cbs->destroy_connector(port->mgr, port->connector); + + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_mst_put_port_malloc(port); +} + +static inline void +drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + } + mutex_unlock(&mgr->lock); + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up_all(&mstb->mgr->tx_waitq); + + drm_dp_mst_put_mstb_malloc(mstb); +} + +static void drm_dp_delayed_destroy_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + delayed_destroy_work); + bool send_hotplug = false, go_again; + /* * Not a regular list traverse as we have to drop the destroy - * connector lock before destroying the connector, to avoid AB->BA + * connector lock before destroying the mstb/port, to avoid AB->BA * ordering between this lock and the config mutex. */ - for (;;) { - mutex_lock(&mgr->destroy_connector_lock); - port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); - if (!port) { - mutex_unlock(&mgr->destroy_connector_lock); - break; + do { + go_again = false; + + for (;;) { + struct drm_dp_mst_branch *mstb; + + mutex_lock(&mgr->delayed_destroy_lock); + mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, + struct drm_dp_mst_branch, + destroy_next); + if (mstb) + list_del(&mstb->destroy_next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!mstb) + break; + + drm_dp_delayed_destroy_mstb(mstb); + go_again = true; } - list_del(&port->next); - mutex_unlock(&mgr->destroy_connector_lock); - mgr->cbs->destroy_connector(mgr, port->connector); + for (;;) { + struct drm_dp_mst_port *port; - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + mutex_lock(&mgr->delayed_destroy_lock); + port = list_first_entry_or_null(&mgr->destroy_port_list, + struct drm_dp_mst_port, + next); + if (port) + list_del(&port->next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!port) + break; + + drm_dp_delayed_destroy_port(port); + send_hotplug = true; + go_again = true; + } + } while (go_again); - drm_dp_mst_put_port_malloc(port); - send_hotplug = true; - } if (send_hotplug) drm_kms_helper_hotplug_event(mgr->dev); } @@ -4280,12 +4856,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); - mutex_init(&mgr->destroy_connector_lock); + mutex_init(&mgr->delayed_destroy_lock); + mutex_init(&mgr->up_req_lock); + mutex_init(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_init(&mgr->topology_ref_history_lock); +#endif INIT_LIST_HEAD(&mgr->tx_msg_downq); - INIT_LIST_HEAD(&mgr->destroy_connector_list); + INIT_LIST_HEAD(&mgr->destroy_port_list); + INIT_LIST_HEAD(&mgr->destroy_branch_device_list); + INIT_LIST_HEAD(&mgr->up_req_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work); - INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); + INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); + INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); init_waitqueue_head(&mgr->tx_waitq); mgr->dev = dev; mgr->aux = aux; @@ -4326,7 +4910,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { drm_dp_mst_topology_mgr_set_mst(mgr, false); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + cancel_work_sync(&mgr->delayed_destroy_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); mgr->payloads = NULL; @@ -4338,10 +4922,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; - mutex_destroy(&mgr->destroy_connector_lock); + mutex_destroy(&mgr->delayed_destroy_lock); mutex_destroy(&mgr->payload_lock); mutex_destroy(&mgr->qlock); mutex_destroy(&mgr->lock); + mutex_destroy(&mgr->up_req_lock); + mutex_destroy(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_destroy(&mgr->topology_ref_history_lock); +#endif } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 8b5a06013ccc..92d16724f949 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -31,7 +31,9 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/anon_inodes.h> #include <linux/dma-fence.h> +#include <linux/file.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/poll.h> @@ -754,3 +756,43 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) spin_unlock_irqrestore(&dev->event_lock, irqflags); } EXPORT_SYMBOL(drm_send_event); + +/** + * mock_drm_getfile - Create a new struct file for the drm device + * @minor: drm minor to wrap (e.g. #drm_device.primary) + * @flags: file creation mode (O_RDWR etc) + * + * This create a new struct file that wraps a DRM file context around a + * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without + * invoking userspace. The struct file may be operated on using its f_op + * (the drm_device.driver.fops) to mimick userspace operations, or be supplied + * to userspace facing functions as an internal/anonymous client. + * + * RETURNS: + * Pointer to newly created struct file, ERR_PTR on failure. + */ +struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags) +{ + struct drm_device *dev = minor->dev; + struct drm_file *priv; + struct file *file; + + priv = drm_file_alloc(minor); + if (IS_ERR(priv)) + return ERR_CAST(priv); + + file = anon_inode_getfile("drm", dev->driver->fops, priv, flags); + if (IS_ERR(file)) { + drm_file_free(priv); + return file; + } + + /* Everyone shares a single global address space */ + file->f_mapping = dev->anon_inode->i_mapping; + + drm_dev_get(dev); + priv->filp = file; + + return file; +} +EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 56f42e0f2584..a9e4a610445a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1105,18 +1105,30 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. + * This reference is cleaned up by the corresponding vm_close + * (which should happen whether the vma was created by this call, or + * by a vm_open due to mremap or partial unmap or whatever). + */ + drm_gem_object_get(obj); + if (obj->funcs && obj->funcs->mmap) { ret = obj->funcs->mmap(obj, vma); - if (ret) + if (ret) { + drm_gem_object_put_unlocked(obj); return ret; + } WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); } else { if (obj->funcs && obj->funcs->vm_ops) vma->vm_ops = obj->funcs->vm_ops; else if (dev->driver->gem_vm_ops) vma->vm_ops = dev->driver->gem_vm_ops; - else + else { + drm_gem_object_put_unlocked(obj); return -EINVAL; + } vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); @@ -1125,14 +1137,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, vma->vm_private_data = obj; - /* Take a ref for this mapping of the object, so that the fault - * handler can dereference the mmap offset's pointer to the object. - * This reference is cleaned up by the corresponding vm_close - * (which should happen whether the vma was created by this call, or - * by a vm_open due to mremap or partial unmap or whatever). - */ - drm_gem_object_get(obj); - return 0; } EXPORT_SYMBOL(drm_gem_mmap_obj); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 7412bfc5c05a..605a8a3da7f9 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -64,8 +64,19 @@ int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + int ret; - return ttm_bo_mmap_obj(vma, bo); + ret = ttm_bo_mmap_obj(vma, bo); + if (ret < 0) + return ret; + + /* + * ttm has its own object refcounting, so drop gem reference + * to avoid double accounting counting. + */ + drm_gem_object_put_unlocked(gem); + + return 0; } EXPORT_SYMBOL(drm_gem_ttm_mmap); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 0ca58803ba46..b50b44e76279 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -274,3 +274,119 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, return ret; } EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge); + +enum drm_of_lvds_pixels { + DRM_OF_LVDS_EVEN = BIT(0), + DRM_OF_LVDS_ODD = BIT(1), +}; + +static int drm_of_lvds_get_port_pixels_type(struct device_node *port_node) +{ + bool even_pixels = + of_property_read_bool(port_node, "dual-lvds-even-pixels"); + bool odd_pixels = + of_property_read_bool(port_node, "dual-lvds-odd-pixels"); + + return (even_pixels ? DRM_OF_LVDS_EVEN : 0) | + (odd_pixels ? DRM_OF_LVDS_ODD : 0); +} + +static int drm_of_lvds_get_remote_pixels_type( + const struct device_node *port_node) +{ + struct device_node *endpoint = NULL; + int pixels_type = -EPIPE; + + for_each_child_of_node(port_node, endpoint) { + struct device_node *remote_port; + int current_pt; + + if (!of_node_name_eq(endpoint, "endpoint")) + continue; + + remote_port = of_graph_get_remote_port(endpoint); + if (!remote_port) { + of_node_put(remote_port); + return -EPIPE; + } + + current_pt = drm_of_lvds_get_port_pixels_type(remote_port); + of_node_put(remote_port); + if (pixels_type < 0) + pixels_type = current_pt; + + /* + * Sanity check, ensure that all remote endpoints have the same + * pixel type. We may lift this restriction later if we need to + * support multiple sinks with different dual-link + * configurations by passing the endpoints explicitly to + * drm_of_lvds_get_dual_link_pixel_order(). + */ + if (!current_pt || pixels_type != current_pt) { + of_node_put(remote_port); + return -EINVAL; + } + } + + return pixels_type; +} + +/** + * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order + * @port1: First DT port node of the Dual-link LVDS source + * @port2: Second DT port node of the Dual-link LVDS source + * + * An LVDS dual-link connection is made of two links, with even pixels + * transitting on one link, and odd pixels on the other link. This function + * returns, for two ports of an LVDS dual-link source, which port shall transmit + * the even and odd pixels, based on the requirements of the connected sink. + * + * The pixel order is determined from the dual-lvds-even-pixels and + * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those + * properties are not present, or if their usage is not valid, this function + * returns -EINVAL. + * + * If either port is not connected, this function returns -EPIPE. + * + * @port1 and @port2 are typically DT sibling nodes, but may have different + * parents when, for instance, two separate LVDS encoders carry the even and odd + * pixels. + * + * Return: + * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 carries even pixels and @port2 + * carries odd pixels + * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 carries odd pixels and @port2 + * carries even pixels + * * -EINVAL - @port1 and @port2 are not connected to a dual-link LVDS sink, or + * the sink configuration is invalid + * * -EPIPE - when @port1 or @port2 are not connected + */ +int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, + const struct device_node *port2) +{ + int remote_p1_pt, remote_p2_pt; + + if (!port1 || !port2) + return -EINVAL; + + remote_p1_pt = drm_of_lvds_get_remote_pixels_type(port1); + if (remote_p1_pt < 0) + return remote_p1_pt; + + remote_p2_pt = drm_of_lvds_get_remote_pixels_type(port2); + if (remote_p2_pt < 0) + return remote_p2_pt; + + /* + * A valid dual-lVDS bus is found when one remote port is marked with + * "dual-lvds-even-pixels", and the other remote port is marked with + * "dual-lvds-odd-pixels", bail out if the markers are not right. + */ + if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD) + return -EINVAL; + + return remote_p1_pt == DRM_OF_LVDS_EVEN ? + DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS : + DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; +} +EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 892ce636ef72..6ee04803c362 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -561,7 +561,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) + if (!length || length > INT_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c index 68f4765a5896..dd33fec5aabd 100644 --- a/drivers/gpu/drm/drm_self_refresh_helper.c +++ b/drivers/gpu/drm/drm_self_refresh_helper.c @@ -133,29 +133,33 @@ out_drop_locks: * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages * @state: the state which has just been applied to hardware * @commit_time_ms: the amount of time in ms that this commit took to complete + * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in + * new state * * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will * update the average entry/exit self refresh times on self refresh transitions. * These averages will be used when calculating how long to delay before * entering self refresh mode after activity. */ -void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, - unsigned int commit_time_ms) +void +drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, + unsigned int commit_time_ms, + unsigned int new_self_refresh_mask) { struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc_state *old_crtc_state; int i; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + bool new_self_refresh_active = new_self_refresh_mask & BIT(i); struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; struct ewma_psr_time *time; if (old_crtc_state->self_refresh_active == - new_crtc_state->self_refresh_active) + new_self_refresh_active) continue; - if (new_crtc_state->self_refresh_active) + if (new_self_refresh_active) time = &sr_data->entry_avg_ms; else time = &sr_data->exit_avg_ms; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 698db540972c..648cf0207309 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) etnaviv_cmdbuf_get_va(&submit->cmdbuf, &gpu->mmu_context->cmdbuf_mapping)); + mutex_unlock(&gpu->mmu_context->lock); + /* Reserve space for the bomap */ if (n_bomap_pages) { bomap_start = bomap = iter.data; @@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) obj->base.size); } - mutex_unlock(&gpu->mmu_context->lock); - etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index 043111a1d60c..f8bf488e9d71 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu memcpy(buf, v2_context->mtlb_cpu, SZ_4K); buf += SZ_4K; - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K) - if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) { memcpy(buf, v2_context->stlb_cpu[i], SZ_4K); + buf += SZ_4K; + } } static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 35ebae6a1be7..3607d348c298 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping, global->memory_base); - if (ret) { - global->ops->free(ctx); - return NULL; + if (ret) + goto out_free; + + if (global->version == ETNAVIV_IOMMU_V1 && + ctx->cmdbuf_mapping.iova > 0x80000000) { + dev_err(global->dev, + "command buffer outside valid memory window\n"); + goto out_unmap; } return ctx; + +out_unmap: + etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping); +out_free: + global->ops->free(ctx); + return NULL; } void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 7b24338fad3c..6cfdb95fef2f 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc, struct mixer_context *ctx = crtc->ctx; int width = mode->hdisplay, height = mode->vdisplay, i; - struct { + static const struct { int hdisplay, vdisplay, htotal, vtotal, scan_val; - } static const modes[] = { + } modes[] = { { 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD }, { 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD }, { 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD }, diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 3c6d57df262d..ba9595960bbe 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -148,3 +148,9 @@ menu "drm/i915 Profile Guided Optimisation" depends on DRM_I915 source "drivers/gpu/drm/i915/Kconfig.profile" endmenu + +menu "drm/i915 Unstable Evolution" + visible if EXPERT && STAGING && BROKEN + depends on DRM_I915 + source "drivers/gpu/drm/i915/Kconfig.unstable" +endmenu diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index eea79125b3ea..1cb28c20807c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -7,7 +7,6 @@ config DRM_I915_WERROR # We use the dependency on !COMPILE_TEST to not be enabled in # allmodconfig or allyesconfig configurations depends on !COMPILE_TEST - select HEADER_TEST default n help Add -Werror to the build flags for (and only for) i915.ko. @@ -22,13 +21,13 @@ config DRM_I915_DEBUG depends on DRM_I915 select DEBUG_FS select PREEMPT_COUNT - select REFCOUNT_FULL select I2C_CHARDEV select STACKDEPOT select DRM_DP_AUX_CHARDEV select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y + select DRM_EXPORT_FOR_TESTS if m select DRM_DEBUG_SELFTEST select DMABUF_SELFTESTS select SW_SYNC # signaling validation framework (igt/syncobj*) @@ -151,6 +150,7 @@ config DRM_I915_SELFTEST bool "Enable selftests upon driver load" depends on DRM_I915 default n + select DRM_EXPORT_FOR_TESTS if m select FAULT_INJECTION select PRIME_NUMBERS help diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index 48df8889a88a..c280b6ae38eb 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -12,6 +12,29 @@ config DRM_I915_USERFAULT_AUTOSUSPEND May be 0 to disable the extra delay and solely use the device level runtime pm autosuspend delay tunable. +config DRM_I915_HEARTBEAT_INTERVAL + int "Interval between heartbeat pulses (ms)" + default 2500 # milliseconds + help + The driver sends a periodic heartbeat down all active engines to + check the health of the GPU and undertake regular house-keeping of + internal driver state. + + May be 0 to disable heartbeats and therefore disable automatic GPU + hang detection. + +config DRM_I915_PREEMPT_TIMEOUT + int "Preempt timeout (ms, jiffy granularity)" + default 640 # milliseconds + help + How long to wait (in milliseconds) for a preemption event to occur + when submitting a new context via execlists. If the current context + does not hit an arbitration point and yield to HW before the timer + expires, the HW will be reset to allow the more important context + to execute. + + May be 0 to disable the timeout. + config DRM_I915_SPIN_REQUEST int "Busywait for request completion (us)" default 5 # microseconds @@ -25,3 +48,29 @@ config DRM_I915_SPIN_REQUEST May be 0 to disable the initial spin. In practice, we estimate the cost of enabling the interrupt (if currently disabled) to be a few microseconds. + +config DRM_I915_STOP_TIMEOUT + int "How long to wait for an engine to quiesce gracefully before reset (ms)" + default 100 # milliseconds + help + By stopping submission and sleeping for a short time before resetting + the GPU, we allow the innocent contexts also on the system to quiesce. + It is then less likely for a hanging context to cause collateral + damage as the system is reset in order to recover. The corollary is + that the reset itself may take longer and so be more disruptive to + interactive or low latency workloads. + +config DRM_I915_TIMESLICE_DURATION + int "Scheduling quantum for userspace batches (ms, jiffy granularity)" + default 1 # milliseconds + help + When two user batches of equal priority are executing, we will + alternate execution of each batch to ensure forward progress of + all users. This is necessary in some cases where there may be + an implicit dependency between those batches that requires + concurrent execution in order for them to proceed, e.g. they + interact with each other via userspace semaphores. Each context + is scheduled for execution for the timeslice duration, before + switching to the next context. + + May be 0 to disable timeslicing. diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable new file mode 100644 index 000000000000..0c2276155c2b --- /dev/null +++ b/drivers/gpu/drm/i915/Kconfig.unstable @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_I915_UNSTABLE + bool "Enable unstable API for early prototype development" + depends on EXPERT + depends on STAGING + depends on BROKEN # should never be enabled by distros! + # We use the dependency on !COMPILE_TEST to not be enabled in + # allmodconfig or allyesconfig configurations + depends on !COMPILE_TEST + default n + help + Enable prototype uAPI under general discussion before they are + finalized. Such prototypes may be withdrawn or substantially + changed before release. They are only enabled here so that a wide + number of interested parties (userspace driver developers) can + verify that the uAPI meet their expectations. These uAPI should + never be used in production. + + Recommended for driver developers _only_. + + If in the slightest bit of doubt, say "N". + +config DRM_I915_UNSTABLE_FAKE_LMEM + bool "Enable the experimental fake lmem" + depends on DRM_I915_UNSTABLE + default n + help + Convert some system memory into a fake local memory region for + testing. diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a16a2daef977..b0c53661f62b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -75,25 +75,30 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o # "Graphics Technology" (aka we talk to the gpu) obj-y += gt/ gt-y += \ + gt/debugfs_engines.o \ + gt/debugfs_gt.o \ + gt/debugfs_gt_pm.o \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ gt/intel_engine_cs.o \ - gt/intel_engine_pool.o \ + gt/intel_engine_heartbeat.o \ gt/intel_engine_pm.o \ + gt/intel_engine_pool.o \ gt/intel_engine_user.o \ gt/intel_gt.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ gt/intel_gt_pm_irq.o \ gt/intel_gt_requests.o \ - gt/intel_hangcheck.o \ gt/intel_llc.o \ gt/intel_lrc.o \ + gt/intel_mocs.o \ gt/intel_rc6.o \ gt/intel_renderstate.o \ gt/intel_reset.o \ - gt/intel_ringbuffer.o \ - gt/intel_mocs.o \ + gt/intel_ring.o \ + gt/intel_ring_submission.o \ + gt/intel_rps.o \ gt/intel_sseu.o \ gt/intel_timeline.o \ gt/intel_workarounds.o @@ -119,6 +124,7 @@ gem-y += \ gem/i915_gem_internal.o \ gem/i915_gem_object.o \ gem/i915_gem_object_blt.o \ + gem/i915_gem_lmem.o \ gem/i915_gem_mman.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ @@ -147,6 +153,7 @@ i915-y += \ i915_scheduler.o \ i915_trace_points.o \ i915_vma.o \ + intel_region_lmem.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support @@ -243,7 +250,8 @@ i915-y += \ oa/i915_oa_cflgt2.o \ oa/i915_oa_cflgt3.o \ oa/i915_oa_cnl.o \ - oa/i915_oa_icl.o + oa/i915_oa_icl.o \ + oa/i915_oa_tgl.o i915-y += i915_perf.o # Post-mortem debug and GPU hang state capture @@ -254,6 +262,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/i915_selftest.o \ selftests/igt_flush_test.o \ selftests/igt_live_test.o \ + selftests/igt_mmap.o \ selftests/igt_reset.o \ selftests/igt_spinner.o diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 6e398c33a524..006b1a297e6f 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -34,6 +34,7 @@ #include "intel_ddi.h" #include "intel_dsi.h" #include "intel_panel.h" +#include "intel_vdsc.h" static inline int header_credits_available(struct drm_i915_private *dev_priv, enum transcoder dsi_trans) @@ -276,7 +277,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { const struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; u32 dss_ctl2; u16 hactive = adjusted_mode->crtc_hdisplay; u16 dl_buffer_depth; @@ -301,18 +302,31 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, I915_WRITE(DSS_CTL1, dss_ctl1); } -static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) +/* aka DSI 8X clock */ +static int afe_clk(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + int bpp; + + if (crtc_state->dsc.compression_enable) + bpp = crtc_state->dsc.compressed_bpp; + else + bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + + return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count); +} + +static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; - u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); - u32 afe_clk_khz; /* 8X Clock */ + int afe_clk_khz; u32 esc_clk_div_m; - afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, - intel_dsi->lane_count); - + afe_clk_khz = afe_clk(encoder, crtc_state); esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); for_each_dsi_port(port, intel_dsi->ports) { @@ -490,7 +504,9 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) } } -static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) +static void +gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -531,7 +547,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) * leave all fields at HW default values. */ if (IS_GEN(dev_priv, 11)) { - if (intel_dsi_bitrate(intel_dsi) <= 800000) { + if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { tmp = I915_READ(DPHY_TA_TIMING_PARAM(port)); tmp &= ~TA_SURE_MASK; @@ -625,7 +641,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum pipe pipe = intel_crtc->pipe; u32 tmp; enum port port; @@ -641,7 +657,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, tmp |= EOTP_DISABLED; /* enable link calibration if freq > 1.5Gbps */ - if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) { + if (afe_clk(encoder, pipe_config) >= 1500 * 1000) { tmp &= ~LINK_CALIBRATION_MASK; tmp |= CALIBRATION_ENABLED_INITIAL_ONLY; } @@ -667,22 +683,26 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* select pixel format */ tmp &= ~PIX_FMT_MASK; - switch (intel_dsi->pixel_format) { - default: - MISSING_CASE(intel_dsi->pixel_format); - /* fallthrough */ - case MIPI_DSI_FMT_RGB565: - tmp |= PIX_FMT_RGB565; - break; - case MIPI_DSI_FMT_RGB666_PACKED: - tmp |= PIX_FMT_RGB666_PACKED; - break; - case MIPI_DSI_FMT_RGB666: - tmp |= PIX_FMT_RGB666_LOOSE; - break; - case MIPI_DSI_FMT_RGB888: - tmp |= PIX_FMT_RGB888; - break; + if (pipe_config->dsc.compression_enable) { + tmp |= PIX_FMT_COMPRESSED; + } else { + switch (intel_dsi->pixel_format) { + default: + MISSING_CASE(intel_dsi->pixel_format); + /* fallthrough */ + case MIPI_DSI_FMT_RGB565: + tmp |= PIX_FMT_RGB565; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + tmp |= PIX_FMT_RGB666_PACKED; + break; + case MIPI_DSI_FMT_RGB666: + tmp |= PIX_FMT_RGB666_LOOSE; + break; + case MIPI_DSI_FMT_RGB888: + tmp |= PIX_FMT_RGB888; + break; + } } if (INTEL_GEN(dev_priv) >= 12) { @@ -745,6 +765,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, case PIPE_C: tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF; break; + case PIPE_D: + tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF; + break; } /* enable DDI buffer */ @@ -763,12 +786,12 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, static void gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); const struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; enum port port; enum transcoder dsi_trans; /* horizontal timings */ @@ -776,11 +799,25 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, u16 hback_porch; /* vertical timings */ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; + int mul = 1, div = 1; + + /* + * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account + * for slower link speed if DSC is enabled. + * + * The compression frequency ratio is the ratio between compressed and + * non-compressed link speeds, and simplifies down to the ratio between + * compressed and non-compressed bpp. + */ + if (crtc_state->dsc.compression_enable) { + mul = crtc_state->dsc.compressed_bpp; + div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + } hactive = adjusted_mode->crtc_hdisplay; - htotal = adjusted_mode->crtc_htotal; - hsync_start = adjusted_mode->crtc_hsync_start; - hsync_end = adjusted_mode->crtc_hsync_end; + htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div); + hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div); + hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div); hsync_size = hsync_end - hsync_start; hback_porch = (adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end); @@ -904,7 +941,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) } } -static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder) +static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -919,7 +957,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder) * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS */ - divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000; + divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000; mul = 8 * 1000000; hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul, divisor); @@ -955,7 +993,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder) static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -972,13 +1010,13 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, gen11_dsi_enable_ddi_buffer(encoder); /* setup D-PHY timings */ - gen11_dsi_setup_dphy_timings(encoder); + gen11_dsi_setup_dphy_timings(encoder, crtc_state); /* step 4h: setup DSI protocol timeouts */ - gen11_dsi_setup_timeouts(encoder); + gen11_dsi_setup_timeouts(encoder, crtc_state); /* Step (4h, 4i, 4j, 4k): Configure transcoder */ - gen11_dsi_configure_transcoder(encoder, pipe_config); + gen11_dsi_configure_transcoder(encoder, crtc_state); /* Step 4l: Gate DDI clocks */ if (IS_GEN(dev_priv, 11)) @@ -1025,14 +1063,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) } static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, + const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { /* step2: enable IO power */ gen11_dsi_enable_io_power(encoder); /* step3: enable DSI PLL */ - gen11_dsi_program_esc_clk_div(encoder); + gen11_dsi_program_esc_clk_div(encoder, crtc_state); } static void gen11_dsi_pre_enable(struct intel_encoder *encoder, @@ -1050,6 +1088,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, /* step5: program and powerup panel */ gen11_dsi_powerup_panel(encoder); + intel_dsc_enable(encoder, pipe_config); + /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); @@ -1211,12 +1251,42 @@ static void gen11_dsi_disable(struct intel_encoder *encoder, gen11_dsi_disable_io_power(encoder); } +static void gen11_dsi_post_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_crtc_vblank_off(old_crtc_state); + + intel_dsc_disable(old_crtc_state); + + skylake_scaler_disable(old_crtc_state); +} + +static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* FIXME: DSC? */ + return intel_dsi_mode_valid(connector, mode); +} + static void gen11_dsi_get_timings(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; + + if (pipe_config->dsc.compressed_bpp) { + int div = pipe_config->dsc.compressed_bpp; + int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + + adjusted_mode->crtc_htotal = + DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div); + adjusted_mode->crtc_hsync_start = + DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div); + adjusted_mode->crtc_hsync_end = + DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div); + } if (intel_dsi->dual_link) { adjusted_mode->crtc_hdisplay *= 2; @@ -1242,22 +1312,66 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + intel_dsc_get_config(encoder, pipe_config); + /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state); - pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk; + pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; if (intel_dsi->dual_link) - pipe_config->base.adjusted_mode.crtc_clock *= 2; + pipe_config->hw.adjusted_mode.crtc_clock *= 2; gen11_dsi_get_timings(encoder, pipe_config); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); } +static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + int dsc_max_bpc = INTEL_GEN(dev_priv) >= 12 ? 12 : 10; + bool use_dsc; + int ret; + + use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc); + if (!use_dsc) + return 0; + + if (crtc_state->pipe_bpp < 8 * 3) + return -EINVAL; + + /* FIXME: split only when necessary */ + if (crtc_state->dsc.slice_count > 1) + crtc_state->dsc.dsc_split = true; + + vdsc_cfg->convert_rgb = true; + + ret = intel_dsc_compute_params(encoder, crtc_state); + if (ret) + return ret; + + /* DSI specific sanity checks on the common code */ + WARN_ON(vdsc_cfg->vbr_enable); + WARN_ON(vdsc_cfg->simple_422); + WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width); + WARN_ON(vdsc_cfg->slice_height < 8); + WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height); + + ret = drm_dsc_compute_rc_parameters(vdsc_cfg); + if (ret) + return ret; + + crtc_state->dsc.compression_enable = true; + + return 0; +} + static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -1265,11 +1379,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; intel_fixed_panel_mode(fixed_mode, adjusted_mode); @@ -1283,8 +1397,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, else pipe_config->cpu_transcoder = TRANSCODER_DSI_0; + if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888) + pipe_config->pipe_bpp = 24; + else + pipe_config->pipe_bpp = 18; + pipe_config->clock_set = true; - pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5; + + if (gen11_dsi_dsc_compute_config(encoder, pipe_config)) + DRM_DEBUG_KMS("Attempting to use DSC failed\n"); + + pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; return 0; } @@ -1292,8 +1415,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - get_dsi_io_power_domains(to_i915(encoder->base.dev), - enc_to_intel_dsi(&encoder->base)); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + get_dsi_io_power_domains(i915, enc_to_intel_dsi(&encoder->base)); + + if (crtc_state->dsc.compression_enable) + intel_display_power_get(i915, + intel_dsc_power_domain(crtc_state)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, @@ -1325,6 +1453,9 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, case TRANS_DDI_EDP_INPUT_C_ONOFF: *pipe = PIPE_C; break; + case TRANS_DDI_EDP_INPUT_D_ONOFF: + *pipe = PIPE_D; + break; default: DRM_ERROR("Invalid PIPE input\n"); goto out; @@ -1360,7 +1491,7 @@ static const struct drm_connector_funcs gen11_dsi_connector_funcs = { static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = { .get_modes = intel_dsi_get_modes, - .mode_valid = intel_dsi_mode_valid, + .mode_valid = gen11_dsi_mode_valid, .atomic_check = intel_digital_connector_atomic_check, }; @@ -1577,6 +1708,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; encoder->pre_enable = gen11_dsi_pre_enable; encoder->disable = gen11_dsi_disable; + encoder->post_disable = gen11_dsi_post_disable; encoder->port = port; encoder->get_config = gen11_dsi_get_config; encoder->update_pipe = intel_panel_update_backlight; @@ -1584,7 +1716,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->get_hw_state = gen11_dsi_get_hw_state; encoder->type = INTEL_OUTPUT_DSI; encoder->cloneable = 0; - encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + encoder->pipe_mask = ~0; encoder->power_domain = POWER_DOMAIN_PORT_DSI; encoder->get_power_domains = gen11_dsi_get_power_domains; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index c5a552a69752..fd0026fc3618 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -186,13 +186,22 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector) struct drm_crtc_state * intel_crtc_duplicate_state(struct drm_crtc *crtc) { + const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state); struct intel_crtc_state *crtc_state; - crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL); + crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL); if (!crtc_state) return NULL; - __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); + __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi); + + /* copy color blobs */ + if (crtc_state->hw.degamma_lut) + drm_property_blob_get(crtc_state->hw.degamma_lut); + if (crtc_state->hw.ctm) + drm_property_blob_get(crtc_state->hw.ctm); + if (crtc_state->hw.gamma_lut) + drm_property_blob_get(crtc_state->hw.gamma_lut); crtc_state->update_pipe = false; crtc_state->disable_lp_wm = false; @@ -200,11 +209,34 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_wm_pre = false; crtc_state->update_wm_post = false; crtc_state->fifo_changed = false; + crtc_state->preload_luts = false; crtc_state->wm.need_postvbl_update = false; crtc_state->fb_bits = 0; crtc_state->update_planes = 0; - return &crtc_state->base; + return &crtc_state->uapi; +} + +static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state) +{ + drm_property_blob_put(crtc_state->hw.degamma_lut); + drm_property_blob_put(crtc_state->hw.gamma_lut); + drm_property_blob_put(crtc_state->hw.ctm); +} + +void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state) +{ + intel_crtc_put_color_blobs(crtc_state); +} + +void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state) +{ + drm_property_replace_blob(&crtc_state->hw.degamma_lut, + crtc_state->uapi.degamma_lut); + drm_property_replace_blob(&crtc_state->hw.gamma_lut, + crtc_state->uapi.gamma_lut); + drm_property_replace_blob(&crtc_state->hw.ctm, + crtc_state->uapi.ctm); } /** @@ -219,7 +251,11 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { - drm_atomic_helper_crtc_destroy_state(crtc, state); + struct intel_crtc_state *crtc_state = to_intel_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); + intel_crtc_free_hw_state(crtc_state); + kfree(crtc_state); } static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, @@ -248,10 +284,10 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta return; /* set scaler mode */ - if (plane_state && plane_state->base.fb && - plane_state->base.fb->format->is_yuv && - plane_state->base.fb->format->num_planes > 1) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + if (plane_state && plane_state->hw.fb && + plane_state->hw.fb->format->is_yuv && + plane_state->hw.fb->format->num_planes > 1) { + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) { mode = SKL_PS_SCALER_MODE_NV12; @@ -318,7 +354,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_plane_state *plane_state = NULL; struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; - struct drm_atomic_state *drm_state = crtc_state->base.state; + struct drm_atomic_state *drm_state = crtc_state->uapi.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); int num_scalers_need; int i; @@ -429,6 +465,13 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) struct intel_atomic_state *state = to_intel_atomic_state(s); drm_atomic_state_default_clear(&state->base); state->dpll_set = state->modeset = false; + state->global_state_changed = false; + state->active_pipes = 0; + memset(&state->min_cdclk, 0, sizeof(state->min_cdclk)); + memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level)); + memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical)); + memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual)); + state->cdclk.pipe = INVALID_PIPE; } struct intel_crtc_state * @@ -442,3 +485,40 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } + +int intel_atomic_lock_global_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + state->global_state_changed = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + int ret; + + ret = drm_modeset_lock(&crtc->base.mutex, + state->base.acquire_ctx); + if (ret) + return ret; + } + + return 0; +} + +int intel_atomic_serialize_global_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + state->global_state_changed = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 58065d3161a3..7b49623419ba 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -16,6 +16,7 @@ struct drm_crtc_state; struct drm_device; struct drm_i915_private; struct drm_property; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -35,6 +36,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector); struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); +void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state); +void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state); struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); void intel_atomic_state_clear(struct drm_atomic_state *state); @@ -46,4 +49,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); +int intel_atomic_lock_global_state(struct intel_atomic_state *state); + +int intel_atomic_serialize_global_state(struct intel_atomic_state *state); + #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index a6cff5a160fb..3e97af682b1b 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -41,6 +41,16 @@ #include "intel_pm.h" #include "intel_sprite.h" +static void intel_plane_state_reset(struct intel_plane_state *plane_state, + struct intel_plane *plane) +{ + memset(plane_state, 0, sizeof(*plane_state)); + + __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base); + + plane_state->scaler_id = -1; +} + struct intel_plane *intel_plane_alloc(void) { struct intel_plane_state *plane_state; @@ -56,8 +66,9 @@ struct intel_plane *intel_plane_alloc(void) return ERR_PTR(-ENOMEM); } - __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base); - plane_state->scaler_id = -1; + intel_plane_state_reset(plane_state, plane); + + plane->base.state = &plane_state->uapi; return plane; } @@ -80,22 +91,24 @@ void intel_plane_free(struct intel_plane *plane) struct drm_plane_state * intel_plane_duplicate_state(struct drm_plane *plane) { - struct drm_plane_state *state; struct intel_plane_state *intel_state; - intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL); + intel_state = to_intel_plane_state(plane->state); + intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL); if (!intel_state) return NULL; - state = &intel_state->base; - - __drm_atomic_helper_plane_duplicate_state(plane, state); + __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi); intel_state->vma = NULL; intel_state->flags = 0; - return state; + /* add reference to fb */ + if (intel_state->hw.fb) + drm_framebuffer_get(intel_state->hw.fb); + + return &intel_state->uapi; } /** @@ -110,18 +123,22 @@ void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { - WARN_ON(to_intel_plane_state(state)->vma); + struct intel_plane_state *plane_state = to_intel_plane_state(state); + WARN_ON(plane_state->vma); - drm_atomic_helper_plane_destroy_state(plane, state); + __drm_atomic_helper_plane_destroy_state(&plane_state->uapi); + if (plane_state->hw.fb) + drm_framebuffer_put(plane_state->hw.fb); + kfree(plane_state); } unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; cpp = fb->format->cpp[0]; @@ -138,22 +155,90 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, return cpp * crtc_state->pixel_rate; } +bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); + struct intel_crtc_state *crtc_state; + + if (!plane_state->uapi.visible || !plane->min_cdclk) + return false; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->min_cdclk[plane->id] = + plane->min_cdclk(crtc_state, plane_state); + + /* + * Does the cdclk need to be bumbed up? + * + * Note: we obviously need to be called before the new + * cdclk frequency is calculated so state->cdclk.logical + * hasn't been populated yet. Hence we look at the old + * cdclk state under dev_priv->cdclk.logical. This is + * safe as long we hold at least one crtc mutex (which + * must be true since we have crtc_state). + */ + if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) { + DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n", + plane->base.base.id, plane->base.name, + crtc_state->min_cdclk[plane->id], + dev_priv->cdclk.logical.cdclk); + return true; + } + + return false; +} + +static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state) +{ + if (plane_state->hw.fb) + drm_framebuffer_put(plane_state->hw.fb); + + memset(&plane_state->hw, 0, sizeof(plane_state->hw)); +} + +void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, + const struct intel_plane_state *from_plane_state) +{ + intel_plane_clear_hw_state(plane_state); + + plane_state->hw.crtc = from_plane_state->uapi.crtc; + plane_state->hw.fb = from_plane_state->uapi.fb; + if (plane_state->hw.fb) + drm_framebuffer_get(plane_state->hw.fb); + + plane_state->hw.alpha = from_plane_state->uapi.alpha; + plane_state->hw.pixel_blend_mode = + from_plane_state->uapi.pixel_blend_mode; + plane_state->hw.rotation = from_plane_state->uapi.rotation; + plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding; + plane_state->hw.color_range = from_plane_state->uapi.color_range; +} + int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *new_plane_state) { - struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane); - const struct drm_framebuffer *fb = new_plane_state->base.fb; + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); + const struct drm_framebuffer *fb; int ret; + intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); + fb = new_plane_state->hw.fb; + new_crtc_state->active_planes &= ~BIT(plane->id); new_crtc_state->nv12_planes &= ~BIT(plane->id); new_crtc_state->c8_planes &= ~BIT(plane->id); new_crtc_state->data_rate[plane->id] = 0; - new_plane_state->base.visible = false; + new_crtc_state->min_cdclk[plane->id] = 0; + new_plane_state->uapi.visible = false; - if (!new_plane_state->base.crtc && !old_plane_state->base.crtc) + if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) return 0; ret = plane->check_plane(new_crtc_state, new_plane_state); @@ -161,18 +246,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ return ret; /* FIXME pre-g4x don't work like this */ - if (new_plane_state->base.visible) + if (new_plane_state->uapi.visible) new_crtc_state->active_planes |= BIT(plane->id); - if (new_plane_state->base.visible && - drm_format_info_is_yuv_semiplanar(fb->format)) + if (new_plane_state->uapi.visible && + intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) new_crtc_state->nv12_planes |= BIT(plane->id); - if (new_plane_state->base.visible && + if (new_plane_state->uapi.visible && fb->format->format == DRM_FORMAT_C8) new_crtc_state->c8_planes |= BIT(plane->id); - if (new_plane_state->base.visible || old_plane_state->base.visible) + if (new_plane_state->uapi.visible || old_plane_state->uapi.visible) new_crtc_state->update_planes |= BIT(plane->id); new_crtc_state->data_rate[plane->id] = @@ -186,11 +271,11 @@ static struct intel_crtc * get_crtc_from_states(const struct intel_plane_state *old_plane_state, const struct intel_plane_state *new_plane_state) { - if (new_plane_state->base.crtc) - return to_intel_crtc(new_plane_state->base.crtc); + if (new_plane_state->uapi.crtc) + return to_intel_crtc(new_plane_state->uapi.crtc); - if (old_plane_state->base.crtc) - return to_intel_crtc(old_plane_state->base.crtc); + if (old_plane_state->uapi.crtc) + return to_intel_crtc(old_plane_state->uapi.crtc); return NULL; } @@ -207,7 +292,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; - new_plane_state->base.visible = false; + new_plane_state->uapi.visible = false; if (!crtc) return 0; @@ -268,26 +353,16 @@ void intel_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_update_plane(&plane->base, crtc); plane->update_plane(plane, crtc_state, plane_state); } -void intel_update_slave(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - - trace_intel_update_plane(&plane->base, crtc); - plane->update_slave(plane, crtc_state, plane_state); -} - void intel_disable_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_disable_plane(&plane->base, crtc); plane->disable_plane(plane, crtc_state); @@ -316,25 +391,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); - if (new_plane_state->base.visible) { + if (new_plane_state->uapi.visible || + new_plane_state->planar_slave) { intel_update_plane(plane, new_crtc_state, new_plane_state); - } else if (new_plane_state->planar_slave) { - struct intel_plane *master = - new_plane_state->planar_linked_plane; - - /* - * We update the slave plane from this function because - * programming it from the master plane's update_plane - * callback runs into issues when the Y plane is - * reassigned, disabled or used by a different plane. - * - * The slave plane is updated with the master plane's - * plane_state. - */ - new_plane_state = - intel_atomic_get_new_plane_state(state, master); - - intel_update_slave(plane, new_crtc_state, new_plane_state); } else { intel_disable_plane(plane, new_crtc_state); } @@ -356,7 +415,7 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - if (new_plane_state->base.visible) + if (new_plane_state->uapi.visible) intel_update_plane(plane, new_crtc_state, new_plane_state); else intel_disable_plane(plane, new_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index dc85af02e9b7..5cedafdddb55 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -20,12 +20,11 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); +void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, + const struct intel_plane_state *from_plane_state); void intel_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); -void intel_update_slave(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); void intel_disable_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); @@ -47,5 +46,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *plane_state); +bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index ed18511befa3..27710098d056 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -28,6 +28,7 @@ #include <drm/i915_component.h> #include "i915_drv.h" +#include "intel_atomic.h" #include "intel_audio.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" @@ -233,7 +234,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = { static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state) { const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; int i; for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { @@ -554,7 +555,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; enum port port = encoder->port; u32 tmp, eldv; @@ -601,7 +602,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; enum pipe pipe = crtc->pipe; enum port port = encoder->port; @@ -691,10 +692,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct i915_audio_component *acomp = dev_priv->audio_component; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; enum port port = encoder->port; enum pipe pipe = crtc->pipe; @@ -752,7 +753,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct i915_audio_component *acomp = dev_priv->audio_component; - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; enum pipe pipe = crtc->pipe; @@ -818,13 +819,8 @@ retry: to_intel_atomic_state(state)->cdclk.force_min_cdclk = enable ? 2 * 96000 : 0; - /* - * Protects dev_priv->cdclk.force_min_cdclk - * Need to lock this here in case we have no active pipes - * and thus wouldn't lock it during the commit otherwise. - */ - ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, - &ctx); + /* Protects dev_priv->cdclk.force_min_cdclk */ + ret = intel_atomic_lock_global_state(to_intel_atomic_state(state)); if (!ret) ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 63c1bd4c2954..8beac06e3f10 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -29,6 +29,7 @@ #include <drm/i915_drm.h> #include "display/intel_display.h" +#include "display/intel_display_types.h" #include "display/intel_gmbus.h" #include "i915_drv.h" @@ -58,6 +59,13 @@ * that. */ +/* Wrapper for VBT child device config */ +struct display_device_data { + struct child_device_config child; + struct dsc_compression_parameters_entry *dsc; + struct list_head node; +}; + #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 @@ -202,17 +210,12 @@ get_lvds_fp_timing(const struct bdb_header *bdb, return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs); } -/* Try to find integrated panel data */ +/* Parse general panel options */ static void -parse_lfp_panel_data(struct drm_i915_private *dev_priv, - const struct bdb_header *bdb) +parse_panel_options(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) { const struct bdb_lvds_options *lvds_options; - const struct bdb_lvds_lfp_data *lvds_lfp_data; - const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; - const struct lvds_dvo_timing *panel_dvo_timing; - const struct lvds_fp_timing *fp_timing; - struct drm_display_mode *panel_fixed_mode; int panel_type; int drrs_mode; int ret; @@ -261,6 +264,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("DRRS not supported (VBT input)\n"); break; } +} + +/* Try to find integrated panel timing data */ +static void +parse_lfp_panel_dtd(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + const struct bdb_lvds_lfp_data *lvds_lfp_data; + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; + const struct lvds_dvo_timing *panel_dvo_timing; + const struct lvds_fp_timing *fp_timing; + struct drm_display_mode *panel_fixed_mode; + int panel_type = dev_priv->vbt.panel_type; lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); if (!lvds_lfp_data) @@ -282,7 +298,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; - DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); + DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, @@ -300,6 +316,98 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, } static void +parse_generic_dtd(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + const struct bdb_generic_dtd *generic_dtd; + const struct generic_dtd_entry *dtd; + struct drm_display_mode *panel_fixed_mode; + int num_dtd; + + generic_dtd = find_section(bdb, BDB_GENERIC_DTD); + if (!generic_dtd) + return; + + if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) { + DRM_ERROR("GDTD size %u is too small.\n", + generic_dtd->gdtd_size); + return; + } else if (generic_dtd->gdtd_size != + sizeof(struct generic_dtd_entry)) { + DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size); + /* DTD has unknown fields, but keep going */ + } + + num_dtd = (get_blocksize(generic_dtd) - + sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size; + if (dev_priv->vbt.panel_type >= num_dtd) { + DRM_ERROR("Panel type %d not found in table of %d DTD's\n", + dev_priv->vbt.panel_type, num_dtd); + return; + } + + dtd = &generic_dtd->dtd[dev_priv->vbt.panel_type]; + + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); + if (!panel_fixed_mode) + return; + + panel_fixed_mode->hdisplay = dtd->hactive; + panel_fixed_mode->hsync_start = + panel_fixed_mode->hdisplay + dtd->hfront_porch; + panel_fixed_mode->hsync_end = + panel_fixed_mode->hsync_start + dtd->hsync; + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end; + + panel_fixed_mode->vdisplay = dtd->vactive; + panel_fixed_mode->vsync_start = + panel_fixed_mode->vdisplay + dtd->vfront_porch; + panel_fixed_mode->vsync_end = + panel_fixed_mode->vsync_start + dtd->vsync; + panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end; + + panel_fixed_mode->clock = dtd->pixel_clock; + panel_fixed_mode->width_mm = dtd->width_mm; + panel_fixed_mode->height_mm = dtd->height_mm; + + panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(panel_fixed_mode); + + if (dtd->hsync_positive_polarity) + panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; + else + panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; + + if (dtd->vsync_positive_polarity) + panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; + else + panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + + DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n"); + drm_mode_debug_printmodeline(panel_fixed_mode); + + dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; +} + +static void +parse_panel_dtd(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + /* + * Older VBTs provided provided DTD information for internal displays + * through the "LFP panel DTD" block (42). As of VBT revision 229, + * that block is now deprecated and DTD information should be provided + * via a newer "generic DTD" block (58). Just to be safe, we'll + * try the new generic DTD block first on VBT >= 229, but still fall + * back to trying the old LFP block if that fails. + */ + if (bdb->version >= 229) + parse_generic_dtd(dev_priv, bdb); + if (!dev_priv->vbt.lfp_lvds_vbt_mode) + parse_lfp_panel_dtd(dev_priv, bdb); +} + +static void parse_lfp_backlight(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { @@ -449,8 +557,9 @@ static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version) { struct sdvo_device_mapping *mapping; + const struct display_device_data *devdata; const struct child_device_config *child; - int i, count = 0; + int count = 0; /* * Only parse SDVO mappings on gens that could have SDVO. This isn't @@ -461,8 +570,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version) return; } - for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + child = &devdata->child; if (child->slave_addr != SLAVE_ADDR1 && child->slave_addr != SLAVE_ADDR2) { @@ -552,16 +661,45 @@ parse_driver_features(struct drm_i915_private *dev_priv, dev_priv->vbt.int_lvds_support = 0; } - DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled); + if (bdb->version < 228) { + DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled); + /* + * If DRRS is not supported, drrs_type has to be set to 0. + * This is because, VBT is configured in such a way that + * static DRRS is 0 and DRRS not supported is represented by + * driver->drrs_enabled=false + */ + if (!driver->drrs_enabled) + dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED; + + dev_priv->vbt.psr.enable = driver->psr_enabled; + } +} + +static void +parse_power_conservation_features(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + const struct bdb_lfp_power *power; + u8 panel_type = dev_priv->vbt.panel_type; + + if (bdb->version < 228) + return; + + power = find_section(bdb, BDB_LVDS_POWER); + if (!power) + return; + + dev_priv->vbt.psr.enable = power->psr & BIT(panel_type); + /* * If DRRS is not supported, drrs_type has to be set to 0. * This is because, VBT is configured in such a way that * static DRRS is 0 and DRRS not supported is represented by - * driver->drrs_enabled=false + * power->drrs & BIT(panel_type)=false */ - if (!driver->drrs_enabled) + if (!(power->drrs & BIT(panel_type))) dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED; - dev_priv->vbt.psr.enable = driver->psr_enabled; } static void @@ -1230,6 +1368,57 @@ err: memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence)); } +static void +parse_compression_parameters(struct drm_i915_private *i915, + const struct bdb_header *bdb) +{ + const struct bdb_compression_parameters *params; + struct display_device_data *devdata; + const struct child_device_config *child; + u16 block_size; + int index; + + if (bdb->version < 198) + return; + + params = find_section(bdb, BDB_COMPRESSION_PARAMETERS); + if (params) { + /* Sanity checks */ + if (params->entry_size != sizeof(params->data[0])) { + DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n"); + return; + } + + block_size = get_blocksize(params); + if (block_size < sizeof(*params)) { + DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n"); + return; + } + } + + list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + child = &devdata->child; + + if (!child->compression_enable) + continue; + + if (!params) { + DRM_DEBUG_KMS("VBT: compression params not available\n"); + continue; + } + + if (child->compression_method_cps) { + DRM_DEBUG_KMS("VBT: CPS compression not supported\n"); + continue; + } + + index = child->compression_structure_index; + + devdata->dsc = kmemdup(¶ms->data[index], + sizeof(*devdata->dsc), GFP_KERNEL); + } +} + static u8 translate_iboost(u8 val) { static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */ @@ -1246,7 +1435,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) const struct ddi_vbt_port_info *info; enum port port; - for (port = PORT_A; port < I915_MAX_PORTS; port++) { + for_each_port(port) { info = &i915->vbt.ddi_port_info[port]; if (info->child && ddc_pin == info->alternate_ddc_pin) @@ -1297,7 +1486,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) const struct ddi_vbt_port_info *info; enum port port; - for (port = PORT_A; port < I915_MAX_PORTS; port++) { + for_each_port(port) { info = &i915->vbt.ddi_port_info[port]; if (info->child && aux_ch == info->alternate_aux_channel) @@ -1418,9 +1607,10 @@ static enum port dvo_port_to_port(u8 dvo_port) } static void parse_ddi_port(struct drm_i915_private *dev_priv, - const struct child_device_config *child, + struct display_device_data *devdata, u8 bdb_version) { + const struct child_device_config *child = &devdata->child; struct ddi_vbt_port_info *info; bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; enum port port; @@ -1443,7 +1633,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); - if (port == PORT_A && is_dvi) { + if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) { DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n", is_hdmi ? "/HDMI" : ""); is_dvi = false; @@ -1461,26 +1651,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, if (bdb_version >= 209) info->supports_tbt = child->tbt; - DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n", + DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, HAS_LSPCON(dev_priv) && child->lspcon, - info->supports_typec_usb, info->supports_tbt); - - if (is_edp && is_dvi) - DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n", - port_name(port)); - if (is_crt && port != PORT_E) - DRM_DEBUG_KMS("Port %c is analog\n", port_name(port)); - if (is_crt && (is_dvi || is_dp)) - DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n", - port_name(port)); - if (is_dvi && (port == PORT_A || port == PORT_E)) - DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port)); - if (!is_dvi && !is_dp && !is_crt) - DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n", - port_name(port)); - if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E)) - DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); + info->supports_typec_usb, info->supports_tbt, + devdata->dsc != NULL); if (is_dvi) { u8 ddc_pin; @@ -1509,6 +1684,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, port_name(port), hdmi_level_shift); info->hdmi_level_shift = hdmi_level_shift; + info->hdmi_level_shift_set = true; } if (bdb_version >= 204) { @@ -1571,8 +1747,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version) { - const struct child_device_config *child; - int i; + struct display_device_data *devdata; if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) return; @@ -1580,11 +1755,8 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version) if (bdb_version < 155) return; - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; - - parse_ddi_port(dev_priv, child, bdb_version); - } + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) + parse_ddi_port(dev_priv, devdata, bdb_version); } static void @@ -1592,8 +1764,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { const struct bdb_general_definitions *defs; + struct display_device_data *devdata; const struct child_device_config *child; - int i, child_device_num, count; + int i, child_device_num; u8 expected_size; u16 block_size; int bus_pin; @@ -1649,26 +1822,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, /* get the number of child device */ child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size; - count = 0; - /* get the number of child device that is present */ - for (i = 0; i < child_device_num; i++) { - child = child_device_ptr(defs, i); - if (!child->device_type) - continue; - count++; - } - if (!count) { - DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); - return; - } - dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL); - if (!dev_priv->vbt.child_dev) { - DRM_DEBUG_KMS("No memory space for child device\n"); - return; - } - dev_priv->vbt.child_dev_num = count; - count = 0; for (i = 0; i < child_device_num; i++) { child = child_device_ptr(defs, i); if (!child->device_type) @@ -1677,23 +1831,29 @@ parse_general_definitions(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n", child->device_type); + devdata = kzalloc(sizeof(*devdata), GFP_KERNEL); + if (!devdata) + break; + /* * Copy as much as we know (sizeof) and is available - * (child_dev_size) of the child device. Accessing the data must - * depend on VBT version. + * (child_dev_size) of the child device config. Accessing the + * data must depend on VBT version. */ - memcpy(dev_priv->vbt.child_dev + count, child, + memcpy(&devdata->child, child, min_t(size_t, defs->child_dev_size, sizeof(*child))); - count++; + + list_add_tail(&devdata->node, &dev_priv->vbt.display_devices); } + + if (list_empty(&dev_priv->vbt.display_devices)) + DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); } /* Common defaults which may be overridden by VBT. */ static void init_vbt_defaults(struct drm_i915_private *dev_priv) { - enum port port; - dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; /* Default to having backlight */ @@ -1721,13 +1881,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv, !HAS_PCH_SPLIT(dev_priv)); DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); - - for (port = PORT_A; port < I915_MAX_PORTS; port++) { - struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[port]; - - info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN; - } } /* Defaults to initialize only if there is no VBT. */ @@ -1736,7 +1889,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) { enum port port; - for (port = PORT_A; port < I915_MAX_PORTS; port++) { + for_each_port(port) { struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; enum phy phy = intel_port_to_phy(dev_priv, port); @@ -1787,6 +1940,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) return false; } + if (vbt->vbt_size > size) { + DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n"); + return false; + } + + size = vbt->vbt_size; + if (range_overflows_t(size_t, vbt->bdb_offset, sizeof(struct bdb_header), @@ -1804,28 +1964,61 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) return vbt; } -static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) +static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv) { - size_t i; + struct pci_dev *pdev = dev_priv->drm.pdev; + void __iomem *p = NULL, *oprom; + struct vbt_header *vbt; + u16 vbt_size; + size_t i, size; - /* Scour memory looking for the VBT signature. */ - for (i = 0; i + 4 < size; i++) { - void *vbt; + oprom = pci_map_rom(pdev, &size); + if (!oprom) + return NULL; - if (ioread32(bios + i) != *((const u32 *) "$VBT")) + /* Scour memory looking for the VBT signature. */ + for (i = 0; i + 4 < size; i += 4) { + if (ioread32(oprom + i) != *((const u32 *)"$VBT")) continue; - /* - * This is the one place where we explicitly discard the address - * space (__iomem) of the BIOS/VBT. - */ - vbt = (void __force *) bios + i; - if (intel_bios_is_valid_vbt(vbt, size - i)) - return vbt; - + p = oprom + i; + size -= i; break; } + if (!p) + goto err_unmap_oprom; + + if (sizeof(struct vbt_header) > size) { + DRM_DEBUG_DRIVER("VBT header incomplete\n"); + goto err_unmap_oprom; + } + + vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size)); + if (vbt_size > size) { + DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n"); + goto err_unmap_oprom; + } + + /* The rest will be validated by intel_bios_is_valid_vbt() */ + vbt = kmalloc(vbt_size, GFP_KERNEL); + if (!vbt) + goto err_unmap_oprom; + + memcpy_fromio(vbt, p, vbt_size); + + if (!intel_bios_is_valid_vbt(vbt, vbt_size)) + goto err_free_vbt; + + pci_unmap_rom(pdev, oprom); + + return vbt; + +err_free_vbt: + kfree(vbt); +err_unmap_oprom: + pci_unmap_rom(pdev, oprom); + return NULL; } @@ -1839,10 +2032,11 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) */ void intel_bios_init(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; const struct vbt_header *vbt = dev_priv->opregion.vbt; + struct vbt_header *oprom_vbt = NULL; const struct bdb_header *bdb; - u8 __iomem *bios = NULL; + + INIT_LIST_HEAD(&dev_priv->vbt.display_devices); if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) { DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); @@ -1853,15 +2047,11 @@ void intel_bios_init(struct drm_i915_private *dev_priv) /* If the OpRegion does not have VBT, look in PCI ROM. */ if (!vbt) { - size_t size; - - bios = pci_map_rom(pdev, &size); - if (!bios) + oprom_vbt = oprom_get_vbt(dev_priv); + if (!oprom_vbt) goto out; - vbt = find_vbt(bios, size); - if (!vbt) - goto out; + vbt = oprom_vbt; DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n"); } @@ -1874,15 +2064,20 @@ void intel_bios_init(struct drm_i915_private *dev_priv) /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); parse_general_definitions(dev_priv, bdb); - parse_lfp_panel_data(dev_priv, bdb); + parse_panel_options(dev_priv, bdb); + parse_panel_dtd(dev_priv, bdb); parse_lfp_backlight(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_driver_features(dev_priv, bdb); + parse_power_conservation_features(dev_priv, bdb); parse_edp(dev_priv, bdb); parse_psr(dev_priv, bdb); parse_mipi_config(dev_priv, bdb); parse_mipi_sequence(dev_priv, bdb); + /* Depends on child device list */ + parse_compression_parameters(dev_priv, bdb); + /* Further processing on pre-parsed data */ parse_sdvo_device_mapping(dev_priv, bdb->version); parse_ddi_ports(dev_priv, bdb->version); @@ -1893,8 +2088,7 @@ out: init_vbt_missing_defaults(dev_priv); } - if (bios) - pci_unmap_rom(pdev, bios); + kfree(oprom_vbt); } /** @@ -1903,9 +2097,14 @@ out: */ void intel_bios_driver_remove(struct drm_i915_private *dev_priv) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; + struct display_device_data *devdata, *n; + + list_for_each_entry_safe(devdata, n, &dev_priv->vbt.display_devices, node) { + list_del(&devdata->node); + kfree(devdata->dsc); + kfree(devdata); + } + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; kfree(dev_priv->vbt.lfp_lvds_vbt_mode); @@ -1929,17 +2128,18 @@ void intel_bios_driver_remove(struct drm_i915_private *dev_priv) */ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) { + const struct display_device_data *devdata; const struct child_device_config *child; - int i; if (!dev_priv->vbt.int_tv_support) return false; - if (!dev_priv->vbt.child_dev_num) + if (list_empty(&dev_priv->vbt.display_devices)) return true; - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + child = &devdata->child; + /* * If the device type is not TV, continue. */ @@ -1971,14 +2171,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) */ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) { + const struct display_device_data *devdata; const struct child_device_config *child; - int i; - if (!dev_priv->vbt.child_dev_num) + if (list_empty(&dev_priv->vbt.display_devices)) return true; - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + child = &devdata->child; /* If the device type is not LFP, continue. * We have to check both the new identifiers as well as the @@ -2020,6 +2220,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) */ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) { + const struct display_device_data *devdata; const struct child_device_config *child; static const struct { u16 dp, hdmi; @@ -2030,7 +2231,6 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, }; - int i; if (HAS_DDI(dev_priv)) { const struct ddi_vbt_port_info *port_info = @@ -2045,11 +2245,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) return false; - if (!dev_priv->vbt.child_dev_num) - return false; - - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + child = &devdata->child; if ((child->dvo_port == port_mapping[port].dp || child->dvo_port == port_mapping[port].hdmi) && @@ -2070,6 +2267,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por */ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) { + const struct display_device_data *devdata; const struct child_device_config *child; static const short port_mapping[] = { [PORT_B] = DVO_PORT_DPB, @@ -2078,16 +2276,12 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) [PORT_E] = DVO_PORT_DPE, [PORT_F] = DVO_PORT_DPF, }; - int i; if (HAS_DDI(dev_priv)) return dev_priv->vbt.ddi_port_info[port].supports_edp; - if (!dev_priv->vbt.child_dev_num) - return false; - - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + child = &devdata->child; if (child->dvo_port == port_mapping[port] && (child->device_type & DEVICE_TYPE_eDP_BITS) == @@ -2136,13 +2330,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) { - const struct child_device_config *child; - int i; - - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + const struct display_device_data *devdata; - if (child_dev_is_dp_dual_mode(child, port)) + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + if (child_dev_is_dp_dual_mode(&devdata->child, port)) return true; } @@ -2159,12 +2350,12 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port) { + const struct display_device_data *devdata; const struct child_device_config *child; u8 dvo_port; - int i; - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; + list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { + child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; @@ -2188,6 +2379,104 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, return false; } +static void fill_dsc(struct intel_crtc_state *crtc_state, + struct dsc_compression_parameters_entry *dsc, + int dsc_max_bpc) +{ + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + int bpc = 8; + + vdsc_cfg->dsc_version_major = dsc->version_major; + vdsc_cfg->dsc_version_minor = dsc->version_minor; + + if (dsc->support_12bpc && dsc_max_bpc >= 12) + bpc = 12; + else if (dsc->support_10bpc && dsc_max_bpc >= 10) + bpc = 10; + else if (dsc->support_8bpc && dsc_max_bpc >= 8) + bpc = 8; + else + DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n", + dsc_max_bpc); + + crtc_state->pipe_bpp = bpc * 3; + + crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp, + VBT_DSC_MAX_BPP(dsc->max_bpp)); + + /* + * FIXME: This is ugly, and slice count should take DSC engine + * throughput etc. into account. + * + * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices. + */ + if (dsc->slices_per_line & BIT(2)) { + crtc_state->dsc.slice_count = 4; + } else if (dsc->slices_per_line & BIT(1)) { + crtc_state->dsc.slice_count = 2; + } else { + /* FIXME */ + if (!(dsc->slices_per_line & BIT(0))) + DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n"); + + crtc_state->dsc.slice_count = 1; + } + + if (crtc_state->hw.adjusted_mode.crtc_hdisplay % + crtc_state->dsc.slice_count != 0) + DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n", + crtc_state->hw.adjusted_mode.crtc_hdisplay, + crtc_state->dsc.slice_count); + + /* + * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the + * implementation specific physical rate buffer size. Currently we use + * the required rate buffer model size calculated in + * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E. + * + * The VBT rc_buffer_block_size and rc_buffer_size definitions + * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC + * implementation should also use the DPCD (or perhaps VBT for eDP) + * provided value for the buffer size. + */ + + /* FIXME: DSI spec says bpc + 1 for this one */ + vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth); + + vdsc_cfg->block_pred_enable = dsc->block_prediction_enable; + + vdsc_cfg->slice_height = dsc->slice_height; +} + +/* FIXME: initially DSI specific */ +bool intel_bios_get_dsc_params(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + int dsc_max_bpc) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct display_device_data *devdata; + const struct child_device_config *child; + + list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + child = &devdata->child; + + if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) + continue; + + if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) { + if (!devdata->dsc) + return false; + + if (crtc_state) + fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc); + + return true; + } + } + + return false; +} + /** * intel_bios_is_port_hpd_inverted - is HPD inverted for %port * @i915: i915 device instance diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 98f064828a57..d6a0c29d37ac 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -35,6 +35,8 @@ #include <drm/i915_drm.h> struct drm_i915_private; +struct intel_crtc_state; +struct intel_encoder; enum port; enum intel_backlight_type { @@ -242,5 +244,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, enum port port); enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); +bool intel_bios_get_dsc_params(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + int dsc_max_bpc); #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 22e83f857de8..dcb66a33be9b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -15,7 +15,7 @@ struct intel_qgv_point { }; struct intel_qgv_info { - struct intel_qgv_point points[3]; + struct intel_qgv_point points[I915_NUM_QGV_POINTS]; u8 num_points; u8 num_channels; u8 t_bl; @@ -264,6 +264,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, void intel_bw_init_hw(struct drm_i915_private *dev_priv) { + if (!HAS_DISPLAY(dev_priv)) + return; + if (IS_GEN(dev_priv, 12)) icl_get_bw_info(dev_priv, &tgl_sa_info); else if (IS_GEN(dev_priv, 11)) @@ -273,17 +276,29 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv, int num_planes) { - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) { + /* + * Any bw group has same amount of QGV points + */ + const struct intel_bw_info *bi = + &dev_priv->max_bw[0]; + unsigned int min_bw = UINT_MAX; + int i; + /* * FIXME with SAGV disabled maybe we can assume * point 1 will always be used? Seems to match * the behaviour observed in the wild. */ - return min3(icl_max_bw(dev_priv, num_planes, 0), - icl_max_bw(dev_priv, num_planes, 1), - icl_max_bw(dev_priv, num_planes, 2)); - else + for (i = 0; i < bi->num_qgv_points; i++) { + unsigned int bw = icl_max_bw(dev_priv, num_planes, i); + + min_bw = min(bw, min_bw); + } + return min_bw; + } else { return UINT_MAX; + } } static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state) @@ -297,7 +312,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); unsigned int data_rate = 0; enum plane_id plane_id; @@ -318,7 +333,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_ void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); bw_state->data_rate[crtc->pipe] = intel_bw_crtc_data_rate(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 3d867963a6d1..7d1ab1e5b7c3 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1273,7 +1273,9 @@ static u8 icl_calc_voltage_level(int cdclk) static u8 ehl_calc_voltage_level(int cdclk) { - if (cdclk > 312000) + if (cdclk > 326400) + return 3; + else if (cdclk > 312000) return 2; else if (cdclk > 180000) return 1; @@ -1902,7 +1904,7 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int pixel_rate = crtc_state->pixel_rate; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -1918,13 +1920,26 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) return DIV_ROUND_UP(pixel_rate * 100, 90); } +static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; + int min_cdclk = 0; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); + + return min_cdclk; +} + int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = - to_i915(crtc_state->base.crtc->dev); + to_i915(crtc_state->uapi.crtc->dev); int min_cdclk; - if (!crtc_state->base.enable) + if (!crtc_state->hw.enable) return 0; min_cdclk = intel_pixel_rate_to_cdclk(crtc_state); @@ -1986,6 +2001,9 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) IS_GEMINILAKE(dev_priv)) min_cdclk = max(158400, min_cdclk); + /* Account for additional needs from the planes */ + min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); @@ -2007,11 +2025,20 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) sizeof(state->min_cdclk)); for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + int ret; + min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); if (min_cdclk < 0) return min_cdclk; + if (state->min_cdclk[i] == min_cdclk) + continue; + state->min_cdclk[i] = min_cdclk; + + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; } min_cdclk = state->cdclk.force_min_cdclk; @@ -2034,7 +2061,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) * future platforms this code will need to be * adjusted. */ -static u8 bxt_compute_min_voltage_level(struct intel_atomic_state *state) +static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; @@ -2047,11 +2074,21 @@ static u8 bxt_compute_min_voltage_level(struct intel_atomic_state *state) sizeof(state->min_voltage_level)); for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - if (crtc_state->base.enable) - state->min_voltage_level[i] = - crtc_state->min_voltage_level; + int ret; + + if (crtc_state->hw.enable) + min_voltage_level = crtc_state->min_voltage_level; else - state->min_voltage_level[i] = 0; + min_voltage_level = 0; + + if (state->min_voltage_level[i] == min_voltage_level) + continue; + + state->min_voltage_level[i] = min_voltage_level; + + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; } min_voltage_level = 0; @@ -2133,7 +2170,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state) vco = dev_priv->skl_preferred_vco_freq; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - if (!crtc_state->base.enable) + if (!crtc_state->hw.enable) continue; if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) @@ -2195,20 +2232,24 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int min_cdclk, cdclk, vco; + int min_cdclk, min_voltage_level, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); if (min_cdclk < 0) return min_cdclk; + min_voltage_level = bxt_compute_min_voltage_level(state); + if (min_voltage_level < 0) + return min_voltage_level; + cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = - max(dev_priv->display.calc_voltage_level(cdclk), - bxt_compute_min_voltage_level(state)); + max_t(int, min_voltage_level, + dev_priv->display.calc_voltage_level(cdclk)); if (!state->active_pipes) { cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); @@ -2225,23 +2266,6 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) return 0; } -static int intel_lock_all_pipes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - /* Add all pipes to the state */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } - - return 0; -} - static int intel_modeset_all_pipes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); @@ -2259,11 +2283,11 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state) if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - if (!crtc_state->base.active || - drm_atomic_crtc_needs_modeset(&crtc_state->base)) + if (!crtc_state->hw.active || + drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) continue; - crtc_state->base.mode_changed = true; + crtc_state->uapi.mode_changed = true; ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); @@ -2308,48 +2332,63 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) return ret; /* - * Writes to dev_priv->cdclk.logical must protected by - * holding all the crtc locks, even if we don't end up + * Writes to dev_priv->cdclk.{actual,logical} must protected + * by holding all the crtc mutexes even if we don't end up * touching the hardware */ - if (intel_cdclk_changed(&dev_priv->cdclk.logical, - &state->cdclk.logical)) { - ret = intel_lock_all_pipes(state); - if (ret < 0) + if (intel_cdclk_changed(&dev_priv->cdclk.actual, + &state->cdclk.actual)) { + /* + * Also serialize commits across all crtcs + * if the actual hw needs to be poked. + */ + ret = intel_atomic_serialize_global_state(state); + if (ret) return ret; + } else if (intel_cdclk_changed(&dev_priv->cdclk.logical, + &state->cdclk.logical)) { + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; + } else { + return 0; } - if (is_power_of_2(state->active_pipes)) { + if (is_power_of_2(state->active_pipes) && + intel_cdclk_needs_cd2x_update(dev_priv, + &dev_priv->cdclk.actual, + &state->cdclk.actual)) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; pipe = ilog2(state->active_pipes); crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (crtc_state && - drm_atomic_crtc_needs_modeset(&crtc_state->base)) + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) pipe = INVALID_PIPE; } else { pipe = INVALID_PIPE; } - /* All pipes must be switched off while we change the cdclk. */ - if (pipe != INVALID_PIPE && - intel_cdclk_needs_cd2x_update(dev_priv, - &dev_priv->cdclk.actual, - &state->cdclk.actual)) { - ret = intel_lock_all_pipes(state); - if (ret) - return ret; - + if (pipe != INVALID_PIPE) { state->cdclk.pipe = pipe; + + DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n", + pipe_name(pipe)); } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, &state->cdclk.actual)) { + /* All pipes must be switched off while we change the cdclk. */ ret = intel_modeset_all_pipes(state); if (ret) return ret; state->cdclk.pipe = INVALID_PIPE; + + DRM_DEBUG_KMS("Modeset required for cdclk change\n"); } DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index fa44eb73d088..3980e8b50c28 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -117,10 +117,10 @@ static bool lut_is_legacy(const struct drm_property_blob *lut) static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state) { - return !crtc_state->base.degamma_lut && - !crtc_state->base.ctm && - crtc_state->base.gamma_lut && - lut_is_legacy(crtc_state->base.gamma_lut); + return !crtc_state->hw.degamma_lut && + !crtc_state->hw.ctm && + crtc_state->hw.gamma_lut && + lut_is_legacy(crtc_state->hw.gamma_lut); } /* @@ -205,7 +205,7 @@ static void icl_update_output_csc(struct intel_crtc *crtc, static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); /* * FIXME if there's a gamma LUT after the CSC, we should @@ -219,7 +219,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, u16 coeffs[9]) { - const struct drm_color_ctm *ctm = crtc_state->base.ctm->data; + const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; const u64 *input; u64 temp[9]; int i; @@ -270,11 +270,11 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); bool limited_color_range = ilk_csc_limited_range(crtc_state); - if (crtc_state->base.ctm) { + if (crtc_state->hw.ctm) { u16 coeff[9]; ilk_csc_convert_ctm(crtc_state, coeff); @@ -309,10 +309,10 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (crtc_state->base.ctm) { + if (crtc_state->hw.ctm) { u16 coeff[9]; ilk_csc_convert_ctm(crtc_state, coeff); @@ -338,12 +338,12 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) */ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (crtc_state->base.ctm) { - const struct drm_color_ctm *ctm = crtc_state->base.ctm->data; + if (crtc_state->hw.ctm) { + const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; u16 coeffs[9] = {}; int i; @@ -404,7 +404,7 @@ static u32 ilk_lut_10(const struct drm_color_lut *color) static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state, const struct drm_property_blob *blob) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; int i; @@ -435,12 +435,12 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state, static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) { - i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut); + i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut); } static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val; @@ -453,7 +453,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) static void ilk_color_commit(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val; @@ -468,7 +468,7 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state) static void hsw_color_commit(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); @@ -478,7 +478,7 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state) static void skl_color_commit(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val = 0; @@ -524,8 +524,8 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, static void i965_load_luts(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) i9xx_load_luts(crtc_state); @@ -547,8 +547,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc, static void ilk_load_luts(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) i9xx_load_luts(crtc_state); @@ -654,9 +654,9 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc) static void ivb_load_luts(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { i9xx_load_luts(crtc_state); @@ -677,9 +677,9 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state) static void bdw_load_luts(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { i9xx_load_luts(crtc_state); @@ -700,11 +700,11 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; - const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data; + const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data; u32 i; /* @@ -739,7 +739,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; @@ -766,8 +766,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat static void glk_load_luts(const struct intel_crtc_state *crtc_state) { - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); /* * On GLK+ both pipe CSC and degamma LUT are controlled @@ -777,7 +777,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state) * the degama LUT so that we don't have to reload * it every time the pipe CSC is being enabled. */ - if (crtc_state->base.degamma_lut) + if (crtc_state->hw.degamma_lut) glk_load_degamma_lut(crtc_state); else glk_load_degamma_lut_linear(crtc_state); @@ -808,7 +808,7 @@ static void icl_load_gcmax(const struct intel_crtc_state *crtc_state, const struct drm_color_lut *color) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; @@ -822,8 +822,8 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state, static void icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *blob = crtc_state->base.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *blob = crtc_state->hw.gamma_lut; const struct drm_color_lut *lut = blob->data; struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; @@ -854,8 +854,8 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) static void icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *blob = crtc_state->base.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *blob = crtc_state->hw.gamma_lut; const struct drm_color_lut *lut = blob->data; const struct drm_color_lut *entry; struct intel_dsb *dsb = intel_dsb_get(crtc); @@ -910,11 +910,11 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) static void icl_load_luts(const struct intel_crtc_state *crtc_state) { - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsb *dsb = intel_dsb_get(crtc); - if (crtc_state->base.degamma_lut) + if (crtc_state->hw.degamma_lut) glk_load_degamma_lut(crtc_state); switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { @@ -990,9 +990,9 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc, static void chv_load_luts(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; cherryview_load_csc_matrix(crtc_state); @@ -1010,28 +1010,77 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); dev_priv->display.load_luts(crtc_state); } void intel_color_commit(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); dev_priv->display.color_commit(crtc_state); } +static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->uapi.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + return !old_crtc_state->hw.gamma_lut && + !old_crtc_state->hw.degamma_lut; +} + +static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->uapi.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + /* + * CGM_PIPE_MODE is itself single buffered. We'd have to + * somehow split it out from chv_load_luts() if we wanted + * the ability to preload the CGM LUTs/CSC without tearing. + */ + if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode) + return false; + + return !old_crtc_state->hw.gamma_lut; +} + +static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->uapi.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + /* + * The hardware degamma is active whenever the pipe + * CSC is active. Thus even if the old state has no + * software degamma we need to avoid clobbering the + * linear hardware degamma mid scanout. + */ + return !old_crtc_state->csc_enable && + !old_crtc_state->hw.gamma_lut; +} + int intel_color_check(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); return dev_priv->display.color_check(crtc_state); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (dev_priv->display.read_luts) dev_priv->display.read_luts(crtc_state); @@ -1055,16 +1104,16 @@ static bool need_plane_update(struct intel_plane *plane, static int intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = - to_intel_atomic_state(new_crtc_state->base.state); + to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_plane *plane; - if (!new_crtc_state->base.active || - drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) + if (!new_crtc_state->hw.active || + drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) return 0; if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable && @@ -1106,9 +1155,9 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected) static int check_luts(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; - const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; int gamma_length, degamma_length; u32 gamma_tests, degamma_tests; @@ -1156,7 +1205,7 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state) return ret; crtc_state->gamma_enable = - crtc_state->base.gamma_lut && + crtc_state->hw.gamma_lut && !crtc_state->c8_planes; crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state); @@ -1165,6 +1214,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1175,11 +1226,11 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) if (crtc_state_is_legacy_gamma(crtc_state)) return 0; - if (crtc_state->base.degamma_lut) + if (crtc_state->hw.degamma_lut) cgm_mode |= CGM_PIPE_MODE_DEGAMMA; - if (crtc_state->base.ctm) + if (crtc_state->hw.ctm) cgm_mode |= CGM_PIPE_MODE_CSC; - if (crtc_state->base.gamma_lut) + if (crtc_state->hw.gamma_lut) cgm_mode |= CGM_PIPE_MODE_GAMMA; return cgm_mode; @@ -1217,6 +1268,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = chv_can_preload_luts(crtc_state); + return 0; } @@ -1253,7 +1306,7 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state) return ret; crtc_state->gamma_enable = - crtc_state->base.gamma_lut && + crtc_state->hw.gamma_lut && !crtc_state->c8_planes; /* @@ -1271,6 +1324,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1279,8 +1334,8 @@ static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state) if (!crtc_state->gamma_enable || crtc_state_is_legacy_gamma(crtc_state)) return GAMMA_MODE_MODE_8BIT; - else if (crtc_state->base.gamma_lut && - crtc_state->base.degamma_lut) + else if (crtc_state->hw.gamma_lut && + crtc_state->hw.degamma_lut) return GAMMA_MODE_MODE_SPLIT; else return GAMMA_MODE_MODE_10BIT; @@ -1294,7 +1349,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state) * CSC comes after the LUT in degamma, RGB->YCbCr, * and RGB full->limited range mode. */ - if (crtc_state->base.degamma_lut || + if (crtc_state->hw.degamma_lut || crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || limited_color_range) return 0; @@ -1312,13 +1367,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state) return ret; crtc_state->gamma_enable = - (crtc_state->base.gamma_lut || - crtc_state->base.degamma_lut) && + (crtc_state->hw.gamma_lut || + crtc_state->hw.degamma_lut) && !crtc_state->c8_planes; crtc_state->csc_enable = crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || - crtc_state->base.ctm || limited_color_range; + crtc_state->hw.ctm || limited_color_range; crtc_state->gamma_mode = ivb_gamma_mode(crtc_state); @@ -1328,6 +1383,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1349,14 +1406,14 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) return ret; crtc_state->gamma_enable = - crtc_state->base.gamma_lut && + crtc_state->hw.gamma_lut && !crtc_state->c8_planes; /* On GLK+ degamma LUT is controlled by csc_enable */ crtc_state->csc_enable = - crtc_state->base.degamma_lut || + crtc_state->hw.degamma_lut || crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || - crtc_state->base.ctm || crtc_state->limited_color_range; + crtc_state->hw.ctm || crtc_state->limited_color_range; crtc_state->gamma_mode = glk_gamma_mode(crtc_state); @@ -1366,6 +1423,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = glk_can_preload_luts(crtc_state); + return 0; } @@ -1373,14 +1432,14 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { u32 gamma_mode = 0; - if (crtc_state->base.degamma_lut) + if (crtc_state->hw.degamma_lut) gamma_mode |= PRE_CSC_GAMMA_ENABLE; - if (crtc_state->base.gamma_lut && + if (crtc_state->hw.gamma_lut && !crtc_state->c8_planes) gamma_mode |= POST_CSC_GAMMA_ENABLE; - if (!crtc_state->base.gamma_lut || + if (!crtc_state->hw.gamma_lut || crtc_state_is_legacy_gamma(crtc_state)) gamma_mode |= GAMMA_MODE_MODE_8BIT; else @@ -1393,7 +1452,7 @@ static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state) { u32 csc_mode = 0; - if (crtc_state->base.ctm) + if (crtc_state->hw.ctm) csc_mode |= ICL_CSC_ENABLE; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || @@ -1415,6 +1474,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) crtc_state->csc_mode = icl_csc_mode(crtc_state); + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1479,7 +1540,7 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (HAS_GMCH(dev_priv)) { @@ -1585,7 +1646,7 @@ static u32 intel_color_lut_pack(u32 val, u32 bit_precision) static struct drm_property_blob * i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; @@ -1622,13 +1683,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state) if (!crtc_state->gamma_enable) return; - crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); } static struct drm_property_blob * i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; @@ -1672,15 +1733,15 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); else - crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state); + crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state); } static struct drm_property_blob * chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; @@ -1714,7 +1775,7 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) static void chv_read_luts(struct intel_crtc_state *crtc_state) { if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) - crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state); + crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state); else i965_read_luts(crtc_state); } @@ -1722,7 +1783,7 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state) static struct drm_property_blob * ilk_read_lut_10(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; @@ -1761,15 +1822,15 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); else - crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state); } static struct drm_property_blob * glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int hw_lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; @@ -1810,9 +1871,9 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); else - crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); + crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); } void intel_color_init(struct intel_crtc *crtc) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index ff6126ea793c..b2b1336ecdb6 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -132,9 +132,9 @@ static void intel_crt_get_config(struct intel_encoder *encoder, { pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); - pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); + pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); - pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; + pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void hsw_crt_get_config(struct intel_encoder *encoder, @@ -144,13 +144,13 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, intel_ddi_get_config(encoder, pipe_config); - pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | + pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC); - pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); + pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); - pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); + pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } /* Note: The caller is required to filter out dpms modes not supported by the @@ -161,8 +161,8 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 adpa; if (INTEL_GEN(dev_priv) >= 5) @@ -241,6 +241,14 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + intel_crtc_vblank_off(old_crtc_state); + + intel_disable_pipe(old_crtc_state); + + intel_ddi_disable_transcoder_func(old_crtc_state); + + ironlake_pfit_disable(old_crtc_state); + intel_ddi_disable_pipe_clock(old_crtc_state); pch_post_disable_crt(encoder, old_crtc_state, old_conn_state); @@ -271,14 +279,14 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; WARN_ON(!crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - dev_priv->display.fdi_link_train(crtc, crtc_state); + hsw_fdi_link_train(encoder, crtc_state); intel_ddi_enable_pipe_clock(crtc_state); } @@ -288,7 +296,7 @@ static void hsw_enable_crt(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; WARN_ON(!crtc_state->has_pch_encoder); @@ -358,7 +366,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -373,7 +381,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -390,7 +398,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -844,7 +852,7 @@ load_detect: } /* for pre-945g platforms use load detect */ - ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx); + ret = intel_get_load_detect_pipe(connector, &tmp, ctx); if (ret > 0) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; @@ -864,6 +872,13 @@ load_detect: out: intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); + + /* + * Make sure the refs for power wells enabled during detect are + * dropped to avoid a new detect cycle triggered by HPD polling. + */ + intel_display_power_flush_work(dev_priv); + return status; } @@ -994,9 +1009,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv) crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI); if (IS_I830(dev_priv)) - crt->base.crtc_mask = BIT(PIPE_A); + crt->base.pipe_mask = BIT(PIPE_A); else - crt->base.crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + crt->base.pipe_mask = ~0; if (IS_GEN(dev_priv, 2)) connector->interlace_allowed = 0; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9ba794cb9b4f..c9ba7d7f3787 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -593,7 +593,7 @@ struct tgl_dkl_phy_ddi_buf_trans { u32 dkl_de_emphasis_control; }; -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = { +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */ @@ -607,6 +607,20 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = { { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ }; +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = { + /* HDMI Preset VS Pre-emph */ + { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */ + { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */ + { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */ + { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */ + { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */ + { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */ + { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */ + { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */ + { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */ + { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */ +}; + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { @@ -888,17 +902,16 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) { + struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port]; int n_entries, level, default_entry; enum phy phy = intel_port_to_phy(dev_priv, port); - level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else - n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); default_entry = n_entries - 1; } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) @@ -927,12 +940,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por return 0; } - /* Choose a good default if VBT is badly populated */ - if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries) - level = default_entry; - if (WARN_ON_ONCE(n_entries == 0)) return 0; + + if (port_info->hdmi_level_shift_set) + level = port_info->hdmi_level_shift; + else + level = default_entry; + if (WARN_ON_ONCE(level >= n_entries)) level = n_entries - 1; @@ -1092,18 +1107,14 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, * DDI A (which is used for eDP) */ -void hsw_fdi_link_train(struct intel_crtc *crtc, +void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_encoder *encoder; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 temp, i, rx_ctl_val, ddi_pll_sel; - for_each_encoder_on_crtc(dev, &crtc->base, encoder) { - WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG); - intel_prepare_dp_ddi_buffers(encoder, crtc_state); - } + intel_prepare_dp_ddi_buffers(encoder, crtc_state); /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the * mode set "sequence for CRT port" document: @@ -1528,7 +1539,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; - pipe_config->base.adjusted_mode.crtc_clock = dotclock; + pipe_config->hw.adjusted_mode.crtc_clock = dotclock; } static void icl_ddi_clock_get(struct intel_encoder *encoder, @@ -1744,7 +1755,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 temp; @@ -1794,31 +1805,13 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, * of Color Encoding Format and Content Color Gamut] while sending * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format. - * - * FIXME MST doesn't pass in the conn_state */ - if (conn_state && intel_dp_needs_vsc_sdp(crtc_state, conn_state)) + if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) temp |= DP_MSA_MISC_COLOR_VSC_SDP; I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); } -void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, - bool state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 temp; - - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if (state == true) - temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - else - temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); -} - /* * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. * @@ -1828,7 +1821,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, static u32 intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -1860,9 +1853,9 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) BUG(); } - if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC) + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DDI_PVSYNC; - if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC) + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DDI_PHSYNC; if (cpu_transcoder == TRANSCODER_EDP) { @@ -1905,6 +1898,9 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); + + if (INTEL_GEN(dev_priv) >= 12) + temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder); } else { temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); @@ -1915,12 +1911,14 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 temp; temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) + temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } @@ -1931,7 +1929,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) static void intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 temp; @@ -1943,7 +1941,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); @@ -2234,14 +2232,14 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, /* * VDSC power is needed when DSC is enabled */ - if (crtc_state->dsc_params.compression_enable) + if (crtc_state->dsc.compression_enable) intel_display_power_get(dev_priv, intel_dsc_power_domain(crtc_state)); } void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); enum port port = encoder->port; @@ -2259,7 +2257,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (cpu_transcoder != TRANSCODER_EDP) { @@ -2370,7 +2368,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else - n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, encoder->type, @@ -2822,8 +2820,13 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; - n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); - ddi_translations = tgl_dkl_phy_ddi_translations; + if (encoder->type == INTEL_OUTPUT_HDMI) { + n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); + ddi_translations = tgl_dkl_phy_hdmi_ddi_trans; + } else { + n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); + ddi_translations = tgl_dkl_phy_dp_ddi_trans; + } if (level >= n_entries) level = n_entries - 1; @@ -2838,6 +2841,8 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, for (ln = 0; ln < 2; ln++) { I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0); + /* All the registers are RMW */ val = I915_READ(DKL_TX_DPCNTL0(tc_port)); val &= ~dpcnt_mask; @@ -2994,11 +2999,38 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpll_lock); } +static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, + u32 port_mask, bool ddi_clk_needed) +{ + enum port port; + u32 val; + + val = I915_READ(ICL_DPCLKA_CFGCR0); + for_each_port_masked(port, port_mask) { + enum phy phy = intel_port_to_phy(dev_priv, port); + bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv, + phy); + + if (ddi_clk_needed == !ddi_clk_off) + continue; + + /* + * Punt on the case now where clock is gated, but it would + * be needed by the port. Something else is really broken then. + */ + if (WARN_ON(ddi_clk_needed)) + continue; + + DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", + phy_name(phy)); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); + } +} + void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; - enum port port; u32 port_mask; bool ddi_clk_needed; @@ -3047,29 +3079,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) ddi_clk_needed = false; } - val = I915_READ(ICL_DPCLKA_CFGCR0); - for_each_port_masked(port, port_mask) { - enum phy phy = intel_port_to_phy(dev_priv, port); - - bool ddi_clk_ungated = !(val & - icl_dpclka_cfgcr0_clk_off(dev_priv, - phy)); - - if (ddi_clk_needed == ddi_clk_ungated) - continue; - - /* - * Punt on the case now where clock is gated, but it would - * be needed by the port. Something else is really broken then. - */ - if (WARN_ON(ddi_clk_needed)) - continue; - - DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", - phy_name(port)); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, val); - } + icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed); } static void intel_ddi_clk_select(struct intel_encoder *encoder, @@ -3337,7 +3347,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, static void tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev); u32 val; if (!cstate->dc3co_exitline) @@ -3352,7 +3362,7 @@ static void tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate) { u32 val, exit_scanlines; - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev); if (!cstate->dc3co_exitline) return; @@ -3370,8 +3380,8 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *cstate) { u32 exit_scanlines; - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay; + struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev); + u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay; cstate->dc3co_exitline = 0; @@ -3379,11 +3389,11 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder, return; /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ - if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A || + if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A || encoder->port != PORT_A) return; - if (!cstate->has_psr2 || !cstate->base.active) + if (!cstate->has_psr2 || !cstate->hw.active) return; /* @@ -3391,7 +3401,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder, * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 */ exit_scanlines = - intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1; + intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1; if (WARN_ON(exit_scanlines > crtc_vdisplay)) return; @@ -3403,7 +3413,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder, static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state) { u32 val; - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (INTEL_GEN(dev_priv) < 12) return; @@ -3433,47 +3443,86 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder); intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder); - /* 1.a got on intel_atomic_commit_tail() */ + /* + * 1. Enable Power Wells + * + * This was handled at the beginning of intel_atomic_commit_tail(), + * before we called down into this function. + */ - /* 2. */ + /* 2. Enable Panel Power if PPS is required */ intel_edp_panel_on(intel_dp); /* - * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by: - * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and - * haswell_crtc_enable()->intel_enable_shared_dpll() + * 3. For non-TBT Type-C ports, set FIA lane count + * (DFLEXDPSP.DPX4TXLATC) + * + * This was done before tgl_ddi_pre_enable_dp by + * haswell_crtc_enable()->intel_encoders_pre_pll_enable(). */ - /* 4.b */ + /* + * 4. Enable the port PLL. + * + * The PLL enabling itself was already done before this function by + * haswell_crtc_enable()->intel_enable_shared_dpll(). We need only + * configure the PLL to port mapping here. + */ intel_ddi_clk_select(encoder, crtc_state); - /* 5. */ + /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ if (!intel_phy_is_tc(dev_priv, phy) || dig_port->tc_mode != TC_PORT_TBT_ALT) intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - /* 6. */ + /* 6. Program DP_MODE */ icl_program_mg_dp_mode(dig_port, crtc_state); /* - * 7.a - Steps in this function should only be executed over MST - * master, what will be taken in care by MST hook - * intel_mst_pre_enable_dp() + * 7. The rest of the below are substeps under the bspec's "Enable and + * Train Display Port" step. Note that steps that are specific to + * MST will be handled by intel_mst_pre_enable_dp() before/after it + * calls into this function. Also intel_mst_pre_enable_dp() only calls + * us when active_mst_links==0, so any steps designated for "single + * stream or multi-stream master transcoder" can just be performed + * unconditionally here. + */ + + /* + * 7.a Configure Transcoder Clock Select to direct the Port clock to the + * Transcoder. */ intel_ddi_enable_pipe_clock(crtc_state); - /* 7.b */ + /* + * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST + * Transport Select + */ intel_ddi_config_transcoder_func(crtc_state); - /* 7.d */ + /* + * 7.c Configure & enable DP_TP_CTL with link training pattern 1 + * selected + * + * This will be handled by the intel_dp_start_link_train() farther + * down this function. + */ + + /* + * 7.d Type C with DP alternate or fixed/legacy/static connection - + * Disable PHY clock gating per Type-C DDI Buffer page + */ icl_phy_set_clock_gating(dig_port, false); - /* 7.e */ + /* 7.e Configure voltage swing and related IO settings */ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); - /* 7.f */ + /* + * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up + * the used lanes of the DDI. + */ if (intel_phy_is_combo(dev_priv, phy)) { bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; @@ -3483,7 +3532,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, lane_reversal); } - /* 7.g */ + /* + * 7.g Configure and enable DDI_BUF_CTL + * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout + * after 500 us. + * + * We only configure what the register value will be here. Actual + * enabling happens during link training farther down. + */ intel_ddi_init_dp_buf_reg(encoder); if (!is_mst) @@ -3496,10 +3552,17 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, * training */ intel_dp_sink_set_fec_ready(intel_dp, crtc_state); - /* 7.c, 7.h, 7.i, 7.j */ + + /* + * 7.i Follow DisplayPort specification training sequence (see notes for + * failure handling) + * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle + * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) + * (timeout after 800 us) + */ intel_dp_start_link_train(intel_dp); - /* 7.k */ + /* 7.k Set DP_TP_CTL link training to Normal */ if (!is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp); @@ -3512,7 +3575,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, * so not enabling it for now. */ - /* 7.l */ + /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_enable(encoder, crtc_state); } @@ -3600,7 +3663,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, else hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state); - intel_ddi_set_dp_msa(crtc_state, conn_state); + /* MST will call a setting of MSA after an allocating of Virtual Channel + * from MST encoder pre_enable callback. + */ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) + intel_ddi_set_dp_msa(crtc_state, conn_state); } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -3651,7 +3718,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -3735,17 +3802,25 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, INTEL_OUTPUT_DP_MST); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (!is_mst) { + /* + * Power down sink before disabling the port, otherwise we end + * up getting interrupts from the sink on detecting link loss. + */ + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + + if (INTEL_GEN(dev_priv) < 12 && !is_mst) intel_ddi_disable_pipe_clock(old_crtc_state); - /* - * Power down sink before disabling the port, otherwise we end - * up getting interrupts from the sink on detecting link loss. - */ - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); - } intel_disable_ddi_buf(encoder, old_crtc_state); + /* + * From TGL spec: "If single stream or multi-stream master transcoder: + * Configure Transcoder Clock select to direct no clock to the + * transcoder" + */ + if (INTEL_GEN(dev_priv) >= 12) + intel_ddi_disable_pipe_clock(old_crtc_state); + intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); @@ -3781,11 +3856,49 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } +static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg; + u32 trans_ddi_func_ctl2_val; + + if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) + return; + + DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", + transcoder_name(old_crtc_state->cpu_transcoder)); + + reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); + trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | + PORT_SYNC_MODE_MASTER_SELECT_MASK); + I915_WRITE(reg, trans_ddi_func_ctl2_val); +} + static void intel_ddi_post_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + bool is_tc_port = intel_phy_is_tc(dev_priv, phy); + + intel_crtc_vblank_off(old_crtc_state); + + intel_disable_pipe(old_crtc_state); + + if (INTEL_GEN(dev_priv) >= 11) + icl_disable_transcoder_port_sync(old_crtc_state); + + intel_ddi_disable_transcoder_func(old_crtc_state); + + intel_dsc_disable(old_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9) + skylake_scaler_disable(old_crtc_state); + else + ironlake_pfit_disable(old_crtc_state); /* * When called from DP MST code: @@ -3809,6 +3922,13 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, if (INTEL_GEN(dev_priv) >= 11) icl_unmap_plls_to_ports(encoder); + + if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port) + intel_display_power_put_unchecked(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); + + if (is_tc_port) + intel_tc_port_put_link(dig_port); } void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, @@ -3870,12 +3990,12 @@ static i915_reg_t gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, enum port port) { - static const i915_reg_t regs[] = { - [PORT_A] = CHICKEN_TRANS_EDP, - [PORT_B] = CHICKEN_TRANS_A, - [PORT_C] = CHICKEN_TRANS_B, - [PORT_D] = CHICKEN_TRANS_C, - [PORT_E] = CHICKEN_TRANS_A, + static const enum transcoder trans[] = { + [PORT_A] = TRANSCODER_EDP, + [PORT_B] = TRANSCODER_A, + [PORT_C] = TRANSCODER_B, + [PORT_D] = TRANSCODER_C, + [PORT_E] = TRANSCODER_A, }; WARN_ON(INTEL_GEN(dev_priv) < 9); @@ -3883,7 +4003,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, if (WARN_ON(port < PORT_A || port > PORT_E)) port = PORT_A; - return regs[port]; + return CHICKEN_TRANS(trans[port]); } static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, @@ -3960,6 +4080,7 @@ static void intel_enable_ddi(struct intel_encoder *encoder, if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) intel_hdcp_enable(to_intel_connector(conn_state->connector), + crtc_state->cpu_transcoder, (u8)conn_state->hdcp_content_type); } @@ -4063,7 +4184,9 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED || content_protection_type_changed) - intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type); + intel_hdcp_enable(connector, + crtc_state->cpu_transcoder, + (u8)conn_state->hdcp_content_type); } static void @@ -4078,7 +4201,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, WARN_ON(crtc && crtc->active); intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes); - if (crtc_state && crtc_state->base.active) + if (crtc_state && crtc_state->hw.active) intel_update_active_dpll(state, crtc, encoder); } @@ -4118,61 +4241,44 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, crtc_state->lane_lat_optim_mask); } -static void -intel_ddi_post_pll_disable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - bool is_tc_port = intel_phy_is_tc(dev_priv, phy); - - if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) - intel_display_power_put_unchecked(dev_priv, - intel_ddi_main_link_aux_domain(dig_port)); - - if (is_tc_port) - intel_tc_port_put_link(dig_port); -} - static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; - u32 val; + u32 dp_tp_ctl, ddi_buf_ctl; bool wait = false; - if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) { - val = I915_READ(DDI_BUF_CTL(port)); - if (val & DDI_BUF_CTL_ENABLE) { - val &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), val); + dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl); + + if (dp_tp_ctl & DP_TP_CTL_ENABLE) { + ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port)); + if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) { + I915_WRITE(DDI_BUF_CTL(port), + ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE); wait = true; } - val = I915_READ(intel_dp->regs.dp_tp_ctl); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl); POSTING_READ(intel_dp->regs.dp_tp_ctl); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); } - val = DP_TP_CTL_ENABLE | - DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; + dp_tp_ctl = DP_TP_CTL_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; if (intel_dp->link_mst) - val |= DP_TP_CTL_MODE_MST; + dp_tp_ctl |= DP_TP_CTL_MODE_MST; else { - val |= DP_TP_CTL_MODE_SST; + dp_tp_ctl |= DP_TP_CTL_MODE_SST; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } - I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl); POSTING_READ(intel_dp->regs.dp_tp_ctl); intel_dp->DP |= DDI_BUF_CTL_ENABLE; @@ -4208,7 +4314,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 temp, flags = 0; @@ -4216,6 +4322,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, if (WARN_ON(transcoder_is_dsi(cpu_transcoder))) return; + intel_dsc_get_config(encoder, pipe_config); + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; @@ -4226,7 +4334,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, else flags |= DRM_MODE_FLAG_NVSYNC; - pipe_config->base.adjusted_mode.flags |= flags; + pipe_config->hw.adjusted_mode.flags |= flags; switch (temp & TRANS_DDI_BPC_MASK) { case TRANS_DDI_BPC_6: @@ -4375,7 +4483,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int ret; @@ -4509,7 +4617,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); - if (!crtc_state->base.active) + if (!crtc_state->hw.active) return 0; if (!crtc_state->hdmi_high_tmds_clock_ratio && @@ -4680,10 +4788,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port]; struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; + struct intel_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; - enum pipe pipe; enum phy phy = intel_port_to_phy(dev_priv, port); init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; @@ -4711,32 +4817,30 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) if (!intel_dig_port) return; - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; + encoder = &intel_dig_port->base; - drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs, + drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); - intel_encoder->hotplug = intel_ddi_hotplug; - intel_encoder->compute_output_type = intel_ddi_compute_output_type; - intel_encoder->compute_config = intel_ddi_compute_config; - intel_encoder->enable = intel_enable_ddi; - intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable; - intel_encoder->post_pll_disable = intel_ddi_post_pll_disable; - intel_encoder->pre_enable = intel_ddi_pre_enable; - intel_encoder->disable = intel_disable_ddi; - intel_encoder->post_disable = intel_ddi_post_disable; - intel_encoder->update_pipe = intel_ddi_update_pipe; - intel_encoder->get_hw_state = intel_ddi_get_hw_state; - intel_encoder->get_config = intel_ddi_get_config; - intel_encoder->suspend = intel_dp_encoder_suspend; - intel_encoder->get_power_domains = intel_ddi_get_power_domains; - intel_encoder->type = INTEL_OUTPUT_DDI; - intel_encoder->power_domain = intel_port_to_power_domain(port); - intel_encoder->port = port; - intel_encoder->cloneable = 0; - for_each_pipe(dev_priv, pipe) - intel_encoder->crtc_mask |= BIT(pipe); + encoder->hotplug = intel_ddi_hotplug; + encoder->compute_output_type = intel_ddi_compute_output_type; + encoder->compute_config = intel_ddi_compute_config; + encoder->enable = intel_enable_ddi; + encoder->pre_pll_enable = intel_ddi_pre_pll_enable; + encoder->pre_enable = intel_ddi_pre_enable; + encoder->disable = intel_disable_ddi; + encoder->post_disable = intel_ddi_post_disable; + encoder->update_pipe = intel_ddi_update_pipe; + encoder->get_hw_state = intel_ddi_get_hw_state; + encoder->get_config = intel_ddi_get_config; + encoder->suspend = intel_dp_encoder_suspend; + encoder->get_power_domains = intel_ddi_get_power_domains; + + encoder->type = INTEL_OUTPUT_DDI; + encoder->power_domain = intel_port_to_power_domain(port); + encoder->port = port; + encoder->cloneable = 0; + encoder->pipe_mask = ~0; if (INTEL_GEN(dev_priv) >= 11) intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & @@ -4744,6 +4848,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) else intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + intel_dig_port->dp.output_reg = INVALID_MMIO_REG; intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); @@ -4754,8 +4859,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_tc_port_init(intel_dig_port, is_legacy); - intel_encoder->update_prepare = intel_ddi_update_prepare; - intel_encoder->update_complete = intel_ddi_update_complete; + encoder->update_prepare = intel_ddi_update_prepare; + encoder->update_complete = intel_ddi_update_complete; } WARN_ON(port > PORT_I); @@ -4771,7 +4876,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { if (!intel_ddi_init_hdmi_connector(intel_dig_port)) goto err; } @@ -4795,6 +4900,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) return; err: - drm_encoder_cleanup(encoder); + drm_encoder_cleanup(&encoder->base); kfree(intel_dig_port); } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 19aeab1246ee..167c6579d972 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -22,7 +22,7 @@ struct intel_encoder; void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); -void hsw_fdi_link_train(struct intel_crtc *crtc, +void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ee9e74a51780..1860da0a493e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -55,6 +55,8 @@ #include "display/intel_tv.h" #include "display/intel_vdsc.h" +#include "gt/intel_rps.h" + #include "i915_drv.h" #include "i915_trace.h" #include "intel_acpi.h" @@ -64,6 +66,7 @@ #include "intel_cdclk.h" #include "intel_color.h" #include "intel_display_types.h" +#include "intel_dp_link_training.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fifo_underrun.h" @@ -83,12 +86,22 @@ /* Primary plane formats for gen <= 3 */ static const u32 i8xx_primary_formats[] = { DRM_FORMAT_C8, - DRM_FORMAT_RGB565, DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, }; -/* Primary plane formats for gen >= 4 */ +/* Primary plane formats for ivb (no fp16 due to hw issue) */ +static const u32 ivb_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, +}; + +/* Primary plane formats for gen >= 4, except ivb */ static const u32 i965_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, @@ -96,6 +109,22 @@ static const u32 i965_primary_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XBGR16161616F, +}; + +/* Primary plane formats for vlv/chv */ +static const u32 vlv_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XBGR16161616F, }; static const u64 i9xx_format_modifiers[] = { @@ -135,14 +164,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); -static void intel_crtc_init_scalers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state); static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); -static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); +static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); struct intel_limit { struct { @@ -516,7 +542,7 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, static bool needs_modeset(const struct intel_crtc_state *state) { - return drm_atomic_crtc_needs_modeset(&state->base); + return drm_atomic_crtc_needs_modeset(&state->uapi); } bool @@ -533,6 +559,12 @@ is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) crtc_state->sync_mode_slaves_mask); } +static bool +is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->master_transcoder != INVALID_TRANSCODER; +} + /* * Platform specific helpers to calculate the port PLL loopback- (clock.m), * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast @@ -644,7 +676,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* @@ -680,7 +712,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_device *dev = crtc_state->uapi.crtc->dev; struct dpll clock; int err = target; @@ -738,7 +770,7 @@ pnv_find_best_dpll(const struct intel_limit *limit, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_device *dev = crtc_state->uapi.crtc->dev; struct dpll clock; int err = target; @@ -794,7 +826,7 @@ g4x_find_best_dpll(const struct intel_limit *limit, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_device *dev = crtc_state->uapi.crtc->dev; struct dpll clock; int max_n; bool found = false; @@ -888,7 +920,7 @@ vlv_find_best_dpll(const struct intel_limit *limit, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; struct dpll clock; unsigned int bestppm = 1000000; @@ -948,7 +980,7 @@ chv_find_best_dpll(const struct intel_limit *limit, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; struct dpll clock; @@ -1011,33 +1043,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, NULL, best_clock); } -bool intel_crtc_active(struct intel_crtc *crtc) -{ - /* Be paranoid as we can arrive here with only partial - * state retrieved from the hardware during setup. - * - * We can ditch the adjusted_mode.crtc_clock check as soon - * as Haswell has gained clock readout/fastboot support. - * - * We can ditch the crtc->primary->state->fb check as soon as we can - * properly reconstruct framebuffers. - * - * FIXME: The intel_crtc->active here should be switched to - * crtc->state->active once we have proper CRTC states wired up - * for atomic. - */ - return crtc->active && crtc->base.primary->state->fb && - crtc->config->base.adjusted_mode.crtc_clock; -} - -enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - return crtc->config->cpu_transcoder; -} - static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1081,7 +1086,7 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) static void intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (INTEL_GEN(dev_priv) >= 4) { @@ -1131,11 +1136,15 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { bool cur_state; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (HAS_DDI(dev_priv)) { - /* DDI does not have a specific FDI_TX register */ + /* + * DDI does not have a specific FDI_TX register. + * + * FDI is never fed from EDP transcoder + * so pipe->transcoder cast is fine here. + */ + enum transcoder cpu_transcoder = (enum transcoder)pipe; u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); } else { @@ -1252,11 +1261,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) } void assert_pipe(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) + enum transcoder cpu_transcoder, bool state) { bool cur_state; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; @@ -1276,8 +1283,9 @@ void assert_pipe(struct drm_i915_private *dev_priv, } I915_STATE_WARN(cur_state != state, - "pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), onoff(state), onoff(cur_state)); + "transcoder %s assertion failure (expected %s, current %s)\n", + transcoder_name(cpu_transcoder), + onoff(state), onoff(cur_state)); } static void assert_plane(struct intel_plane *plane, bool state) @@ -1404,7 +1412,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ assert_panel_unlocked(dev_priv, pipe); @@ -1453,7 +1461,7 @@ static void chv_enable_pll(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ assert_panel_unlocked(dev_priv, pipe); @@ -1500,7 +1508,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, u32 dpll = crtc_state->dpll_hw_state.dpll; int i; - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ if (i9xx_has_pps(dev_priv)) @@ -1540,7 +1548,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -1549,7 +1557,7 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) return; /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); POSTING_READ(DPLL(pipe)); @@ -1560,7 +1568,7 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) u32 val; /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, (enum transcoder)pipe); val = DPLL_INTEGRATED_REF_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -1577,7 +1585,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) u32 val; /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, (enum transcoder)pipe); val = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -1631,7 +1639,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; @@ -1645,11 +1653,16 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s assert_fdi_rx_enabled(dev_priv, pipe); if (HAS_PCH_CPT(dev_priv)) { - /* Workaround: Set the timing override bit before enabling the - * pch transcoder. */ reg = TRANS_CHICKEN2(pipe); val = I915_READ(reg); + /* + * Workaround: Set the timing override bit + * before enabling the pch transcoder. + */ val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); I915_WRITE(reg, val); } @@ -1658,6 +1671,10 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s pipeconf_val = I915_READ(PIPECONF(pipe)); if (HAS_PCH_IBX(dev_priv)) { + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_FRAME_START_DELAY_MASK; + val |= TRANS_FRAME_START_DELAY(0); + /* * Make the BPC in transcoder be consistent with * that in pipeconf reg. For HDMI we must use 8bpc @@ -1695,9 +1712,12 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); assert_fdi_rx_enabled(dev_priv, PIPE_A); - /* Workaround: set timing override bit. */ val = I915_READ(TRANS_CHICKEN2(PIPE_A)); + /* Workaround: set timing override bit. */ val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); val = TRANS_ENABLE; @@ -1775,7 +1795,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); /* * On i965gm the hardware frame counter reads @@ -1795,16 +1815,25 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + assert_vblank_disabled(&crtc->base); drm_crtc_set_max_vblank_count(&crtc->base, intel_crtc_max_vblank_count(crtc_state)); drm_crtc_vblank_on(&crtc->base); } +void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + drm_crtc_vblank_off(&crtc->base); + assert_vblank_disabled(&crtc->base); +} + static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; @@ -1860,9 +1889,9 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) intel_wait_for_pipe_scanline_moving(crtc); } -static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) +void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; @@ -1905,6 +1934,66 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) return IS_GEN(dev_priv, 2) ? 2048 : 4096; } +static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) +{ + if (!is_ccs_modifier(fb->modifier)) + return false; + + return plane >= fb->format->num_planes / 2; +} + +static bool is_gen12_ccs_modifier(u64 modifier) +{ + return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS; +} + +static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) +{ + return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); +} + +static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) +{ + if (is_ccs_modifier(fb->modifier)) + return is_ccs_plane(fb, plane); + + return plane == 1; +} + +static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) +{ + WARN_ON(!is_ccs_modifier(fb->modifier) || + (main_plane && main_plane >= fb->format->num_planes / 2)); + + return fb->format->num_planes / 2 + main_plane; +} + +static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) +{ + WARN_ON(!is_ccs_modifier(fb->modifier) || + ccs_plane < fb->format->num_planes / 2); + + return ccs_plane - fb->format->num_planes / 2; +} + +/* Return either the main plane's CCS or - if not a CCS FB - UV plane */ +static int +intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) +{ + if (is_ccs_modifier(fb->modifier)) + return main_to_ccs_plane(fb, main_plane); + + return 1; +} + +bool +intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + uint64_t modifier) +{ + return info->is_yuv && + info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); +} + static unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { @@ -1920,16 +2009,20 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) else return 512; case I915_FORMAT_MOD_Y_TILED_CCS: - if (color_plane == 1) + if (is_ccs_plane(fb, color_plane)) return 128; /* fall through */ + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + if (is_ccs_plane(fb, color_plane)) + return 64; + /* fall through */ case I915_FORMAT_MOD_Y_TILED: if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) return 128; else return 512; case I915_FORMAT_MOD_Yf_TILED_CCS: - if (color_plane == 1) + if (is_ccs_plane(fb, color_plane)) return 128; /* fall through */ case I915_FORMAT_MOD_Yf_TILED: @@ -1956,6 +2049,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) static unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane) { + if (is_gen12_ccs_plane(fb, color_plane)) + return 1; + return intel_tile_size(to_i915(fb->dev)) / intel_tile_width_bytes(fb, color_plane); } @@ -1969,7 +2065,7 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, unsigned int cpp = fb->format->cpp[color_plane]; *tile_width = tile_width_bytes / cpp; - *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; + *tile_height = intel_tile_height(fb, color_plane); } unsigned int @@ -2046,7 +2142,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = to_i915(fb->dev); /* AUX_DIST needs only 4K alignment */ - if (color_plane == 1) + if (is_aux_plane(fb, color_plane)) return 4096; switch (fb->modifier) { @@ -2056,6 +2152,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, if (INTEL_GEN(dev_priv) >= 9) return 256 * 1024; return 0; + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: case I915_FORMAT_MOD_Y_TILED: @@ -2069,7 +2167,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); return INTEL_GEN(dev_priv) < 4 || @@ -2112,19 +2210,18 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * pin/unpin/fence and not more. */ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - i915_gem_object_lock(obj); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); - pinctl = 0; - - /* Valleyview is definitely limited to scanning out the first + /* + * Valleyview is definitely limited to scanning out the first * 512MiB. Lets presume this behaviour was inherited from the * g4x display engine and that all earlier gen are similarly * limited. Testing suggests that it is a little more * complicated than this. For example, Cherryview appears quite * happy to scanout from anywhere within its global aperture. */ + pinctl = 0; if (HAS_GMCH(dev_priv)) pinctl |= PIN_MAPPABLE; @@ -2136,7 +2233,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { int ret; - /* Install a fence for tiled scan-out. Pre-i965 always needs a + /* + * Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always, when * possible, install a fence as the cost is not that onerous. @@ -2166,8 +2264,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, i915_vma_get(vma); err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); - - i915_gem_object_unlock(obj); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return vma; } @@ -2202,7 +2298,7 @@ u32 intel_fb_xy_to_linear(int x, int y, const struct intel_plane_state *state, int color_plane) { - const struct drm_framebuffer *fb = state->base.fb; + const struct drm_framebuffer *fb = state->hw.fb; unsigned int cpp = fb->format->cpp[color_plane]; unsigned int pitch = state->color_plane[color_plane].stride; @@ -2250,9 +2346,10 @@ static u32 intel_adjust_tile_offset(int *x, int *y, return new_offset; } -static bool is_surface_linear(u64 modifier, int color_plane) +static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) { - return modifier == DRM_FORMAT_MOD_LINEAR; + return fb->modifier == DRM_FORMAT_MOD_LINEAR || + is_gen12_ccs_plane(fb, color_plane); } static u32 intel_adjust_aligned_offset(int *x, int *y, @@ -2267,7 +2364,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, WARN_ON(new_offset > old_offset); - if (!is_surface_linear(fb->modifier, color_plane)) { + if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int pitch_tiles; @@ -2303,8 +2400,8 @@ static u32 intel_plane_adjust_aligned_offset(int *x, int *y, int color_plane, u32 old_offset, u32 new_offset) { - return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane, - state->base.rotation, + return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, + state->hw.rotation, state->color_plane[color_plane].stride, old_offset, new_offset); } @@ -2337,7 +2434,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, if (alignment) alignment--; - if (!is_surface_linear(fb->modifier, color_plane)) { + if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles, pitch_tiles; @@ -2378,10 +2475,10 @@ static u32 intel_plane_compute_aligned_offset(int *x, int *y, const struct intel_plane_state *state, int color_plane) { - struct intel_plane *intel_plane = to_intel_plane(state->base.plane); + struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); - const struct drm_framebuffer *fb = state->base.fb; - unsigned int rotation = state->base.rotation; + const struct drm_framebuffer *fb = state->hw.fb; + unsigned int rotation = state->hw.rotation; int pitch = state->color_plane[color_plane].stride; u32 alignment; @@ -2439,6 +2536,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) return I915_TILING_X; case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: return I915_TILING_Y; default: return I915_TILING_NONE; @@ -2459,7 +2557,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) * us a ratio of one byte in the CCS for each 8x16 pixels in the * main surface. */ -static const struct drm_format_info ccs_formats[] = { +static const struct drm_format_info skl_ccs_formats[] = { { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, @@ -2470,6 +2568,28 @@ static const struct drm_format_info ccs_formats[] = { .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, }; +/* + * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the + * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles + * in the main surface. With 4 byte pixels and each Y-tile having dimensions of + * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in + * the main surface. + */ +static const struct drm_format_info gen12_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, +}; + static const struct drm_format_info * lookup_format_info(const struct drm_format_info formats[], int num_formats, u32 format) @@ -2490,8 +2610,12 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) switch (cmd->modifier[0]) { case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: - return lookup_format_info(ccs_formats, - ARRAY_SIZE(ccs_formats), + return lookup_format_info(skl_ccs_formats, + ARRAY_SIZE(skl_ccs_formats), + cmd->pixel_format); + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + return lookup_format_info(gen12_ccs_formats, + ARRAY_SIZE(gen12_ccs_formats), cmd->pixel_format); default: return NULL; @@ -2500,10 +2624,17 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) bool is_ccs_modifier(u64 modifier) { - return modifier == I915_FORMAT_MOD_Y_TILED_CCS || + return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_CCS || modifier == I915_FORMAT_MOD_Yf_TILED_CCS; } +static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) +{ + return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], + 512) * 64; +} + u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier) { @@ -2515,6 +2646,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, * the highest stride limits of them all. */ crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + if (!crtc) + return 0; + plane = to_intel_plane(crtc->base.primary); return plane->max_stride(plane, pixel_format, modifier, @@ -2545,8 +2679,9 @@ static u32 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); + u32 tile_width; - if (fb->modifier == DRM_FORMAT_MOD_LINEAR) { + if (is_surface_linear(fb, color_plane)) { u32 max_stride = intel_plane_fb_max_stride(dev_priv, fb->format->format, fb->modifier); @@ -2555,20 +2690,41 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) * To make remapping with linear generally feasible * we need the stride to be page aligned. */ - if (fb->pitches[color_plane] > max_stride) + if (fb->pitches[color_plane] > max_stride && + !is_ccs_modifier(fb->modifier)) return intel_tile_size(dev_priv); else return 64; - } else { - return intel_tile_width_bytes(fb, color_plane); } + + tile_width = intel_tile_width_bytes(fb, color_plane); + if (is_ccs_modifier(fb->modifier) && color_plane == 0) { + /* + * Display WA #0531: skl,bxt,kbl,glk + * + * Render decompression and plane width > 3840 + * combined with horizontal panning requires the + * plane stride to be a multiple of 4. We'll just + * require the entire fb to accommodate that to avoid + * potential runtime errors at plane configuration time. + */ + if (IS_GEN(dev_priv, 9) && fb->width > 3840) + tile_width *= 4; + /* + * The main surface pitch must be padded to a multiple of four + * tile widths. + */ + else if (INTEL_GEN(dev_priv) >= 12) + tile_width *= 4; + } + return tile_width; } bool intel_plane_can_remap(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; int i; /* We don't want to deal with remapping with cursors */ @@ -2606,16 +2762,16 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state) static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; u32 stride, max_stride; /* * No remapping for invisible planes since we don't have * an actual source viewport to remap. */ - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return false; if (!intel_plane_can_remap(plane_state)) @@ -2632,12 +2788,167 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) return stride > max_stride; } +static void +intel_fb_plane_get_subsampling(int *hsub, int *vsub, + const struct drm_framebuffer *fb, + int color_plane) +{ + int main_plane; + + if (color_plane == 0) { + *hsub = 1; + *vsub = 1; + + return; + } + + /* + * TODO: Deduct the subsampling from the char block for all CCS + * formats and planes. + */ + if (!is_gen12_ccs_plane(fb, color_plane)) { + *hsub = fb->format->hsub; + *vsub = fb->format->vsub; + + return; + } + + main_plane = ccs_to_main_plane(fb, color_plane); + *hsub = drm_format_info_block_width(fb->format, color_plane) / + drm_format_info_block_width(fb->format, main_plane); + + /* + * The min stride check in the core framebuffer_check() function + * assumes that format->hsub applies to every plane except for the + * first plane. That's incorrect for the CCS AUX plane of the first + * plane, but for the above check to pass we must define the block + * width with that subsampling applied to it. Adjust the width here + * accordingly, so we can calculate the actual subsampling factor. + */ + if (main_plane == 0) + *hsub *= fb->format->hsub; + + *vsub = 32; +} +static int +intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + int main_plane; + int hsub, vsub; + int tile_width, tile_height; + int ccs_x, ccs_y; + int main_x, main_y; + + if (!is_ccs_plane(fb, ccs_plane)) + return 0; + + intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); + + tile_width *= hsub; + tile_height *= vsub; + + ccs_x = (x * hsub) % tile_width; + ccs_y = (y * vsub) % tile_height; + + main_plane = ccs_to_main_plane(fb, ccs_plane); + main_x = intel_fb->normal[main_plane].x % tile_width; + main_y = intel_fb->normal[main_plane].y % tile_height; + + /* + * CCS doesn't have its own x/y offset register, so the intra CCS tile + * x/y offsets must match between CCS and the main surface. + */ + if (main_x != ccs_x || main_y != ccs_y) { + DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", + main_x, main_y, + ccs_x, ccs_y, + intel_fb->normal[main_plane].x, + intel_fb->normal[main_plane].y, + x, y); + return -EINVAL; + } + + return 0; +} + +static void +intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) +{ + int hsub, vsub; + + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane); + *w = fb->width / hsub; + *h = fb->height / vsub; +} + +/* + * Setup the rotated view for an FB plane and return the size the GTT mapping + * requires for this view. + */ +static u32 +setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info, + u32 gtt_offset_rotated, int x, int y, + unsigned int width, unsigned int height, + unsigned int tile_size, + unsigned int tile_width, unsigned int tile_height, + struct drm_framebuffer *fb) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_rotation_info *rot_info = &intel_fb->rot_info; + unsigned int pitch_tiles; + struct drm_rect r; + + /* Y or Yf modifiers required for 90/270 rotation */ + if (fb->modifier != I915_FORMAT_MOD_Y_TILED && + fb->modifier != I915_FORMAT_MOD_Yf_TILED) + return 0; + + if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane))) + return 0; + + rot_info->plane[plane] = *plane_info; + + intel_fb->rotated[plane].pitch = plane_info->height * tile_height; + + /* rotate the x/y offsets to match the GTT view */ + drm_rect_init(&r, x, y, width, height); + drm_rect_rotate(&r, + plane_info->width * tile_width, + plane_info->height * tile_height, + DRM_MODE_ROTATE_270); + x = r.x1; + y = r.y1; + + /* rotate the tile dimensions to match the GTT view */ + pitch_tiles = intel_fb->rotated[plane].pitch / tile_height; + swap(tile_width, tile_height); + + /* + * We only keep the x/y offsets, so push all of the + * gtt offset into the x/y offsets. + */ + intel_adjust_tile_offset(&x, &y, + tile_width, tile_height, + tile_size, pitch_tiles, + gtt_offset_rotated * tile_size, 0); + + /* + * First pixel of the framebuffer from + * the start of the rotated gtt mapping. + */ + intel_fb->rotated[plane].x = x; + intel_fb->rotated[plane].y = y; + + return plane_info->width * plane_info->height; +} + static int intel_fill_fb_info(struct drm_i915_private *dev_priv, struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct intel_rotation_info *rot_info = &intel_fb->rot_info; struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 gtt_offset_rotated = 0; unsigned int max_size = 0; @@ -2652,8 +2963,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, int ret; cpp = fb->format->cpp[i]; - width = drm_framebuffer_plane_width(fb->width, fb, i); - height = drm_framebuffer_plane_height(fb->height, fb, i); + intel_fb_plane_dims(&width, &height, fb, i); ret = intel_fb_offset_to_xy(&x, &y, fb, i); if (ret) { @@ -2662,36 +2972,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, return ret; } - if (is_ccs_modifier(fb->modifier) && i == 1) { - int hsub = fb->format->hsub; - int vsub = fb->format->vsub; - int tile_width, tile_height; - int main_x, main_y; - int ccs_x, ccs_y; - - intel_tile_dims(fb, i, &tile_width, &tile_height); - tile_width *= hsub; - tile_height *= vsub; - - ccs_x = (x * hsub) % tile_width; - ccs_y = (y * vsub) % tile_height; - main_x = intel_fb->normal[0].x % tile_width; - main_y = intel_fb->normal[0].y % tile_height; - - /* - * CCS doesn't have its own x/y offset register, so the intra CCS tile - * x/y offsets must match between CCS and the main surface. - */ - if (main_x != ccs_x || main_y != ccs_y) { - DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", - main_x, main_y, - ccs_x, ccs_y, - intel_fb->normal[0].x, - intel_fb->normal[0].y, - x, y); - return -EINVAL; - } - } + ret = intel_fb_check_ccs_xy(fb, i, x, y); + if (ret) + return ret; /* * The fence (if used) is aligned to the start of the object @@ -2722,23 +3005,21 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, tile_size); offset /= tile_size; - if (!is_surface_linear(fb->modifier, i)) { + if (!is_surface_linear(fb, i)) { + struct intel_remapped_plane_info plane_info; unsigned int tile_width, tile_height; - unsigned int pitch_tiles; - struct drm_rect r; intel_tile_dims(fb, i, &tile_width, &tile_height); - rot_info->plane[i].offset = offset; - rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); - rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); - rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); - - intel_fb->rotated[i].pitch = - rot_info->plane[i].height * tile_height; + plane_info.offset = offset; + plane_info.stride = DIV_ROUND_UP(fb->pitches[i], + tile_width * cpp); + plane_info.width = DIV_ROUND_UP(x + width, tile_width); + plane_info.height = DIV_ROUND_UP(y + height, + tile_height); /* how many tiles does this plane need */ - size = rot_info->plane[i].stride * rot_info->plane[i].height; + size = plane_info.stride * plane_info.height; /* * If the plane isn't horizontally tile aligned, * we need one more tile. @@ -2746,36 +3027,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, if (x != 0) size++; - /* rotate the x/y offsets to match the GTT view */ - drm_rect_init(&r, x, y, width, height); - drm_rect_rotate(&r, - rot_info->plane[i].width * tile_width, - rot_info->plane[i].height * tile_height, - DRM_MODE_ROTATE_270); - x = r.x1; - y = r.y1; - - /* rotate the tile dimensions to match the GTT view */ - pitch_tiles = intel_fb->rotated[i].pitch / tile_height; - swap(tile_width, tile_height); - - /* - * We only keep the x/y offsets, so push all of the - * gtt offset into the x/y offsets. - */ - intel_adjust_tile_offset(&x, &y, - tile_width, tile_height, - tile_size, pitch_tiles, - gtt_offset_rotated * tile_size, 0); - - gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; - - /* - * First pixel of the framebuffer from - * the start of the rotated gtt mapping. - */ - intel_fb->rotated[i].x = x; - intel_fb->rotated[i].y = y; + gtt_offset_rotated += + setup_fb_rotation(i, &plane_info, + gtt_offset_rotated, + x, y, width, height, + tile_size, + tile_width, tile_height, + fb); } else { size = DIV_ROUND_UP((y + height) * fb->pitches[i] + x * cpp, tile_size); @@ -2798,11 +3056,11 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - struct drm_framebuffer *fb = plane_state->base.fb; + to_i915(plane_state->uapi.plane->dev); + struct drm_framebuffer *fb = plane_state->hw.fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_rotation_info *info = &plane_state->view.rotated; - unsigned int rotation = plane_state->base.rotation; + unsigned int rotation = plane_state->hw.rotation; int i, num_planes = fb->format->num_planes; unsigned int tile_size = intel_tile_size(dev_priv); unsigned int src_x, src_y; @@ -2813,20 +3071,20 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state) plane_state->view.type = drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED; - src_x = plane_state->base.src.x1 >> 16; - src_y = plane_state->base.src.y1 >> 16; - src_w = drm_rect_width(&plane_state->base.src) >> 16; - src_h = drm_rect_height(&plane_state->base.src) >> 16; + src_x = plane_state->uapi.src.x1 >> 16; + src_y = plane_state->uapi.src.y1 >> 16; + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_h = drm_rect_height(&plane_state->uapi.src) >> 16; WARN_ON(is_ccs_modifier(fb->modifier)); /* Make src coordinates relative to the viewport */ - drm_rect_translate(&plane_state->base.src, + drm_rect_translate(&plane_state->uapi.src, -(src_x << 16), -(src_y << 16)); /* Rotate src coordinates to match rotated GTT view */ if (drm_rotation_90_or_270(rotation)) - drm_rect_rotate(&plane_state->base.src, + drm_rect_rotate(&plane_state->uapi.src, src_w << 16, src_h << 16, DRM_MODE_ROTATE_270); @@ -2859,6 +3117,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state) DRM_MODE_ROTATE_0, tile_size); offset /= tile_size; + WARN_ON(i >= ARRAY_SIZE(info->plane)); info->plane[i].offset = offset; info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); @@ -2908,8 +3167,8 @@ static int intel_plane_compute_gtt(struct intel_plane_state *plane_state) { const struct intel_framebuffer *fb = - to_intel_framebuffer(plane_state->base.fb); - unsigned int rotation = plane_state->base.rotation; + to_intel_framebuffer(plane_state->hw.fb); + unsigned int rotation = plane_state->hw.rotation; int i, num_planes; if (!fb) @@ -2946,7 +3205,7 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state) /* Rotate src coordinates to match rotated GTT view */ if (drm_rotation_90_or_270(rotation)) - drm_rect_rotate(&plane_state->base.src, + drm_rect_rotate(&plane_state->uapi.src, fb->base.width << 16, fb->base.height << 16, DRM_MODE_ROTATE_270); @@ -2958,6 +3217,8 @@ static int i9xx_format_to_fourcc(int format) switch (format) { case DISPPLANE_8BPP: return DRM_FORMAT_C8; + case DISPPLANE_BGRA555: + return DRM_FORMAT_ARGB1555; case DISPPLANE_BGRX555: return DRM_FORMAT_XRGB1555; case DISPPLANE_BGRX565: @@ -2967,10 +3228,20 @@ static int i9xx_format_to_fourcc(int format) return DRM_FORMAT_XRGB8888; case DISPPLANE_RGBX888: return DRM_FORMAT_XBGR8888; + case DISPPLANE_BGRA888: + return DRM_FORMAT_ARGB8888; + case DISPPLANE_RGBA888: + return DRM_FORMAT_ABGR8888; case DISPPLANE_BGRX101010: return DRM_FORMAT_XRGB2101010; case DISPPLANE_RGBX101010: return DRM_FORMAT_XBGR2101010; + case DISPPLANE_BGRA101010: + return DRM_FORMAT_ARGB2101010; + case DISPPLANE_RGBA101010: + return DRM_FORMAT_ABGR2101010; + case DISPPLANE_RGBX161616: + return DRM_FORMAT_XBGR16161616F; } } @@ -3013,10 +3284,17 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) return DRM_FORMAT_XRGB8888; } case PLANE_CTL_FORMAT_XRGB_2101010: - if (rgb_order) - return DRM_FORMAT_XBGR2101010; - else - return DRM_FORMAT_XRGB2101010; + if (rgb_order) { + if (alpha) + return DRM_FORMAT_ABGR2101010; + else + return DRM_FORMAT_XBGR2101010; + } else { + if (alpha) + return DRM_FORMAT_ARGB2101010; + else + return DRM_FORMAT_XRGB2101010; + } case PLANE_CTL_FORMAT_XRGB_16161616F: if (rgb_order) { if (alpha) @@ -3112,19 +3390,19 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state, bool visible) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - plane_state->base.visible = visible; + plane_state->uapi.visible = visible; if (visible) - crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); + crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); else - crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); + crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); } static void fixup_active_planes(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct drm_plane *plane; /* @@ -3135,13 +3413,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state) crtc_state->active_planes = 0; drm_for_each_plane_mask(plane, &dev_priv->drm, - crtc_state->base.plane_mask) + crtc_state->uapi.plane_mask) crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); } static void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane *plane) { + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -3154,9 +3433,30 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_set_plane_visible(crtc_state, plane_state, false); fixup_active_planes(crtc_state); crtc_state->data_rate[plane->id] = 0; + crtc_state->min_cdclk[plane->id] = 0; if (plane->id == PLANE_PRIMARY) - intel_pre_disable_primary_noatomic(&crtc->base); + hsw_disable_ips(crtc_state); + + /* + * Vblank time updates from the shadow to live plane control register + * are blocked if the memory self-refresh mode is active at that + * moment. So to make sure the plane gets truly disabled, disable + * first the self-refresh mode. The self-refresh enable bit in turn + * will be checked/applied by the HW only at the next frame start + * event which is after the vblank start event, so we need to have a + * wait-for-vblank between disabling the plane and the pipe. + */ + if (HAS_GMCH(dev_priv) && + intel_set_memory_cxsr(dev_priv, false)) + intel_wait_for_vblank(dev_priv, crtc->pipe); + + /* + * Gen2 reports pipe underruns whenever all planes are disabled. + * So disable underrun reporting before all the planes get disabled. + */ + if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes) + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); intel_disable_plane(plane, crtc_state); } @@ -3209,7 +3509,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, continue; if (intel_plane_ggtt_offset(state) == plane_config->base) { - fb = state->base.fb; + fb = state->hw.fb; drm_framebuffer_get(fb); goto valid_fb; } @@ -3227,11 +3527,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, return; valid_fb: - intel_state->base.rotation = plane_config->rotation; + intel_state->hw.rotation = plane_config->rotation; intel_fill_fb_ggtt_view(&intel_state->view, fb, - intel_state->base.rotation); + intel_state->hw.rotation); intel_state->color_plane[0].stride = - intel_fb_pitch(fb, 0, intel_state->base.rotation); + intel_fb_pitch(fb, 0, intel_state->hw.rotation); intel_state->vma = intel_pin_and_fence_fb_obj(fb, @@ -3259,14 +3559,15 @@ valid_fb: plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; - intel_state->base.src = drm_plane_state_src(plane_state); - intel_state->base.dst = drm_plane_state_dest(plane_state); + intel_state->uapi.src = drm_plane_state_src(plane_state); + intel_state->uapi.dst = drm_plane_state_dest(plane_state); if (plane_config->tiling) dev_priv->preserve_bios_swizzle = true; plane_state->fb = fb; plane_state->crtc = &intel_crtc->base; + intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); atomic_or(to_intel_plane(primary)->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); @@ -3358,14 +3659,16 @@ static int icl_max_plane_height(void) static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, int main_x, int main_y, u32 main_offset) { - const struct drm_framebuffer *fb = plane_state->base.fb; - int hsub = fb->format->hsub; - int vsub = fb->format->vsub; - int aux_x = plane_state->color_plane[1].x; - int aux_y = plane_state->color_plane[1].y; - u32 aux_offset = plane_state->color_plane[1].offset; - u32 alignment = intel_surf_alignment(fb, 1); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int ccs_plane = main_to_ccs_plane(fb, 0); + int aux_x = plane_state->color_plane[ccs_plane].x; + int aux_y = plane_state->color_plane[ccs_plane].y; + u32 aux_offset = plane_state->color_plane[ccs_plane].offset; + u32 alignment = intel_surf_alignment(fb, ccs_plane); + int hsub; + int vsub; + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); while (aux_offset >= main_offset && aux_y <= main_y) { int x, y; @@ -3377,8 +3680,12 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state x = aux_x / hsub; y = aux_y / vsub; - aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1, - aux_offset, aux_offset - alignment); + aux_offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + ccs_plane, + aux_offset, + aux_offset - + alignment); aux_x = x * hsub + aux_x % hsub; aux_y = y * vsub + aux_y % vsub; } @@ -3386,25 +3693,28 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state if (aux_x != main_x || aux_y != main_y) return false; - plane_state->color_plane[1].offset = aux_offset; - plane_state->color_plane[1].x = aux_x; - plane_state->color_plane[1].y = aux_y; + plane_state->color_plane[ccs_plane].offset = aux_offset; + plane_state->color_plane[ccs_plane].x = aux_x; + plane_state->color_plane[ccs_plane].y = aux_y; return true; } static int skl_check_main_surface(struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; - int x = plane_state->base.src.x1 >> 16; - int y = plane_state->base.src.y1 >> 16; - int w = drm_rect_width(&plane_state->base.src) >> 16; - int h = drm_rect_height(&plane_state->base.src) >> 16; + struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + int x = plane_state->uapi.src.x1 >> 16; + int y = plane_state->uapi.src.y1 >> 16; + int w = drm_rect_width(&plane_state->uapi.src) >> 16; + int h = drm_rect_height(&plane_state->uapi.src) >> 16; int max_width; int max_height; - u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; + u32 alignment; + u32 offset; + int aux_plane = intel_main_to_aux_plane(fb, 0); + u32 aux_offset = plane_state->color_plane[aux_plane].offset; if (INTEL_GEN(dev_priv) >= 11) max_width = icl_max_plane_width(fb, 0, rotation); @@ -3470,7 +3780,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) offset, offset - alignment); } - if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) { + if (x != plane_state->color_plane[aux_plane].x || + y != plane_state->color_plane[aux_plane].y) { DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); return -EINVAL; } @@ -3484,7 +3795,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * Put the final coordinates back so that the src * coordinate checks will see the right values. */ - drm_rect_translate_to(&plane_state->base.src, + drm_rect_translate_to(&plane_state->uapi.src, x << 16, y << 16); return 0; @@ -3492,14 +3803,14 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; int max_width = skl_max_plane_width(fb, 1, rotation); int max_height = 4096; - int x = plane_state->base.src.x1 >> 17; - int y = plane_state->base.src.y1 >> 17; - int w = drm_rect_width(&plane_state->base.src) >> 17; - int h = drm_rect_height(&plane_state->base.src) >> 17; + int x = plane_state->uapi.src.x1 >> 17; + int y = plane_state->uapi.src.y1 >> 17; + int w = drm_rect_width(&plane_state->uapi.src) >> 17; + int h = drm_rect_height(&plane_state->uapi.src) >> 17; u32 offset; intel_add_fb_offsets(&x, &y, plane_state, 1); @@ -3521,15 +3832,18 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; - int src_x = plane_state->base.src.x1 >> 16; - int src_y = plane_state->base.src.y1 >> 16; - int hsub = fb->format->hsub; - int vsub = fb->format->vsub; - int x = src_x / hsub; - int y = src_y / vsub; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int src_x = plane_state->uapi.src.x1 >> 16; + int src_y = plane_state->uapi.src.y1 >> 16; + int hsub; + int vsub; + int x; + int y; u32 offset; + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1); + x = src_x / hsub; + y = src_y / vsub; intel_add_fb_offsets(&x, &y, plane_state, 1); offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1); @@ -3542,21 +3856,22 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) int skl_check_plane_surface(struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; ret = intel_plane_compute_gtt(plane_state); if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; /* * Handle the AUX surface first since * the main surface setup depends on it. */ - if (drm_format_info_is_yuv_semiplanar(fb->format)) { + if (intel_format_info_is_yuv_semiplanar(fb->format, + fb->modifier)) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -3577,6 +3892,53 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; } +static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + /* + * g4x bspec says 64bpp pixel rate can't exceed 80% + * of cdclk when the sprite plane is enabled on the + * same pipe. ilk/snb bspec says 64bpp pixel rate is + * never allowed to exceed 80% of cdclk. Let's just go + * with the ilk/snb limit always. + */ + if (cpp == 8) { + *num = 10; + *den = 8; + } else { + *num = 1; + *den = 1; + } +} + +static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + i9xx_plane_ratio(crtc_state, plane_state, &num, &den); + + /* two pixels per clock with double wide pipe */ + if (crtc_state->double_wide) + den *= 2; + + return DIV_ROUND_UP(pixel_rate * num, den); +} + unsigned int i9xx_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -3606,7 +3968,7 @@ i9xx_plane_max_stride(struct intel_plane *plane, static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dspcntr = 0; @@ -3626,9 +3988,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; u32 dspcntr; dspcntr = DISPLAY_PLANE_ENABLE; @@ -3644,6 +4006,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XRGB1555: dspcntr |= DISPPLANE_BGRX555; break; + case DRM_FORMAT_ARGB1555: + dspcntr |= DISPPLANE_BGRA555; + break; case DRM_FORMAT_RGB565: dspcntr |= DISPPLANE_BGRX565; break; @@ -3653,12 +4018,27 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XBGR8888: dspcntr |= DISPPLANE_RGBX888; break; + case DRM_FORMAT_ARGB8888: + dspcntr |= DISPPLANE_BGRA888; + break; + case DRM_FORMAT_ABGR8888: + dspcntr |= DISPPLANE_RGBA888; + break; case DRM_FORMAT_XRGB2101010: dspcntr |= DISPPLANE_BGRX101010; break; case DRM_FORMAT_XBGR2101010: dspcntr |= DISPPLANE_RGBX101010; break; + case DRM_FORMAT_ARGB2101010: + dspcntr |= DISPPLANE_BGRA101010; + break; + case DRM_FORMAT_ABGR2101010: + dspcntr |= DISPPLANE_RGBA101010; + break; + case DRM_FORMAT_XBGR16161616F: + dspcntr |= DISPPLANE_RGBX161616; + break; default: MISSING_CASE(fb->format->format); return 0; @@ -3680,8 +4060,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, int i9xx_check_plane_surface(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - int src_x, src_y; + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int src_x, src_y, src_w; u32 offset; int ret; @@ -3689,11 +4070,16 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; - src_x = plane_state->base.src.x1 >> 16; - src_y = plane_state->base.src.y1 >> 16; + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_x = plane_state->uapi.src.x1 >> 16; + src_y = plane_state->uapi.src.y1 >> 16; + + /* Undocumented hardware limit on i965/g4x/vlv/chv */ + if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) + return -EINVAL; intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); @@ -3707,14 +4093,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) * Put the final coordinates back so that the src * coordinate checks will see the right values. */ - drm_rect_translate_to(&plane_state->base.src, + drm_rect_translate_to(&plane_state->uapi.src, src_x << 16, src_y << 16); /* HSW/BDW do this automagically in hardware */ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { - unsigned int rotation = plane_state->base.rotation; - int src_w = drm_rect_width(&plane_state->base.src) >> 16; - int src_h = drm_rect_height(&plane_state->base.src) >> 16; + unsigned int rotation = plane_state->hw.rotation; + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; if (rotation & DRM_MODE_ROTATE_180) { src_x += src_w - 1; @@ -3751,15 +4137,15 @@ static int i9xx_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); int ret; ret = chv_plane_check_rotation(plane_state); if (ret) return ret; - ret = drm_atomic_helper_check_plane_state(&plane_state->base, - &crtc_state->base, + ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, + &crtc_state->uapi, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, i9xx_plane_has_windowing(plane), @@ -3771,7 +4157,7 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); @@ -3792,10 +4178,10 @@ static void i9xx_update_plane(struct intel_plane *plane, u32 linear_offset; int x = plane_state->color_plane[0].x; int y = plane_state->color_plane[0].y; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - int crtc_w = drm_rect_width(&plane_state->base.dst); - int crtc_h = drm_rect_height(&plane_state->base.dst); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); unsigned long irqflags; u32 dspaddr_offset; u32 dspcntr; @@ -3935,7 +4321,7 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) */ static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; int i; @@ -3954,7 +4340,7 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, * The stride is either expressed as a multiple of 64 bytes chunks for * linear buffers or in number of tiles for tiled buffers. */ - if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + if (is_surface_linear(fb, color_plane)) return 64; else if (drm_rotation_90_or_270(rotation)) return intel_tile_height(fb, color_plane); @@ -3965,8 +4351,8 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, u32 skl_plane_stride(const struct intel_plane_state *plane_state, int color_plane) { - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; u32 stride = plane_state->color_plane[color_plane].stride; if (color_plane >= fb->format->num_planes) @@ -3989,8 +4375,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_ARGB8888: return PLANE_CTL_FORMAT_XRGB_8888; case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: return PLANE_CTL_FORMAT_XRGB_2101010; case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: @@ -4035,10 +4423,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format) static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) { - if (!plane_state->base.fb->format->has_alpha) + if (!plane_state->hw.fb->format->has_alpha) return PLANE_CTL_ALPHA_DISABLE; - switch (plane_state->base.pixel_blend_mode) { + switch (plane_state->hw.pixel_blend_mode) { case DRM_MODE_BLEND_PIXEL_NONE: return PLANE_CTL_ALPHA_DISABLE; case DRM_MODE_BLEND_PREMULTI: @@ -4046,17 +4434,17 @@ static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) case DRM_MODE_BLEND_COVERAGE: return PLANE_CTL_ALPHA_HW_PREMULTIPLY; default: - MISSING_CASE(plane_state->base.pixel_blend_mode); + MISSING_CASE(plane_state->hw.pixel_blend_mode); return PLANE_CTL_ALPHA_DISABLE; } } static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) { - if (!plane_state->base.fb->format->has_alpha) + if (!plane_state->hw.fb->format->has_alpha) return PLANE_COLOR_ALPHA_DISABLE; - switch (plane_state->base.pixel_blend_mode) { + switch (plane_state->hw.pixel_blend_mode) { case DRM_MODE_BLEND_PIXEL_NONE: return PLANE_COLOR_ALPHA_DISABLE; case DRM_MODE_BLEND_PREMULTI: @@ -4064,7 +4452,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state case DRM_MODE_BLEND_COVERAGE: return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; default: - MISSING_CASE(plane_state->base.pixel_blend_mode); + MISSING_CASE(plane_state->hw.pixel_blend_mode); return PLANE_COLOR_ALPHA_DISABLE; } } @@ -4080,6 +4468,10 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier) return PLANE_CTL_TILED_Y; case I915_FORMAT_MOD_Y_TILED_CCS: return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + return PLANE_CTL_TILED_Y | + PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | + PLANE_CTL_CLEAR_COLOR_DISABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: @@ -4130,7 +4522,7 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect) u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 plane_ctl = 0; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -4149,9 +4541,9 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 plane_ctl; @@ -4161,10 +4553,10 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, plane_ctl |= skl_plane_ctl_alpha(plane_state); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; - if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) + if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; - if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; } @@ -4186,7 +4578,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 plane_color_ctl = 0; if (INTEL_GEN(dev_priv) >= 11) @@ -4205,21 +4597,21 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); u32 plane_color_ctl = 0; plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { - if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) + if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; else plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; - if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } else if (fb->format->is_yuv) { plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; @@ -4407,7 +4799,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 trans_ddi_func_ctl2_val; u8 master_select; @@ -4435,25 +4827,6 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state trans_ddi_func_ctl2_val); } -static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 trans_ddi_func_ctl2_val; - - if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) - return; - - DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", - transcoder_name(old_crtc_state->cpu_transcoder)); - - reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); - trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | - PORT_SYNC_MODE_MASTER_SELECT_MASK); - I915_WRITE(reg, trans_ddi_func_ctl2_val); -} - static void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -4506,7 +4879,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc, u32 temp, tries; /* FDI needs bits from pipe first */ - assert_pipe_enabled(dev_priv, pipe); + assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -4851,7 +5224,7 @@ train_done: static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; @@ -4916,12 +5289,10 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) udelay(100); } -static void ironlake_fdi_disable(struct drm_crtc *crtc) +static void ironlake_fdi_disable(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; @@ -5012,9 +5383,9 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv) /* Program iCLKIP clock to the desired frequency */ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int clock = crtc_state->base.adjusted_mode.crtc_clock; + int clock = crtc_state->hw.adjusted_mode.crtc_clock; u32 divsel, phaseinc, auxdiv, phasedir = 0; u32 temp; @@ -5128,7 +5499,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; @@ -5171,7 +5542,7 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); switch (crtc->pipe) { @@ -5201,7 +5572,7 @@ static struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_connector_state *connector_state; const struct drm_connector *connector; struct intel_encoder *encoder = NULL; @@ -5233,7 +5604,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, static void ironlake_pch_enable(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; @@ -5287,7 +5658,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, if (HAS_PCH_CPT(dev_priv) && intel_crtc_has_dp_encoder(crtc_state)) { const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; i915_reg_t reg = TRANS_DP_CTL(pipe); enum port port; @@ -5317,7 +5688,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, static void lpt_pch_enable(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; @@ -5331,9 +5702,9 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe) +static void cpt_verify_modeset(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t dslreg = PIPEDSL(pipe); u32 temp; @@ -5429,15 +5800,16 @@ static int skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, unsigned int scaler_user, int *scaler_id, int src_w, int src_h, int dst_w, int dst_h, - const struct drm_format_info *format, bool need_scaler) + const struct drm_format_info *format, + u64 modifier, bool need_scaler) { struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; struct intel_crtc *intel_crtc = - to_intel_crtc(crtc_state->base.crtc); + to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; /* * Src coordinates are already rotated by 270 degrees for @@ -5453,7 +5825,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, * Once NV12 is enabled, handle it here while allocating scaler * for NV12. */ - if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && + if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); return -EINVAL; @@ -5483,7 +5855,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } - if (format && drm_format_info_is_yuv_semiplanar(format) && + if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); return -EINVAL; @@ -5525,17 +5897,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, */ int skl_update_scaler_crtc(struct intel_crtc_state *state) { - const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode; bool need_scaler = false; if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) need_scaler = true; - return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, + return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_vdisplay, NULL, need_scaler); + adjusted_mode->crtc_vdisplay, NULL, 0, + need_scaler); } /** @@ -5551,26 +5924,28 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { struct intel_plane *intel_plane = - to_intel_plane(plane_state->base.plane); + to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); - struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_framebuffer *fb = plane_state->hw.fb; int ret; - bool force_detach = !fb || !plane_state->base.visible; + bool force_detach = !fb || !plane_state->uapi.visible; bool need_scaler = false; /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && - fb && drm_format_info_is_yuv_semiplanar(fb->format)) + fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), &plane_state->scaler_id, - drm_rect_width(&plane_state->base.src) >> 16, - drm_rect_height(&plane_state->base.src) >> 16, - drm_rect_width(&plane_state->base.dst), - drm_rect_height(&plane_state->base.dst), - fb ? fb->format : NULL, need_scaler); + drm_rect_width(&plane_state->uapi.src) >> 16, + drm_rect_height(&plane_state->uapi.src) >> 16, + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst), + fb ? fb->format : NULL, + fb ? fb->modifier : 0, + need_scaler); if (ret || plane_state->scaler_id < 0) return ret; @@ -5592,10 +5967,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -5611,6 +5984,13 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: break; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + if (INTEL_GEN(dev_priv) >= 11) + break; + /* fall through */ default: DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", intel_plane->base.base.id, intel_plane->base.name, @@ -5621,8 +6001,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, return 0; } -static void skylake_scaler_disable(struct intel_crtc *crtc) +void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); int i; for (i = 0; i < crtc->num_scalers; i++) @@ -5631,7 +6012,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc) static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; const struct intel_crtc_scaler_state *scaler_state = @@ -5668,7 +6049,7 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -5689,7 +6070,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) void hsw_enable_ips(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5725,7 +6106,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state) void hsw_disable_ips(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5760,77 +6141,10 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) */ } -/** - * intel_post_enable_primary - Perform operations after enabling primary plane - * @crtc: the CRTC whose primary plane was just enabled - * @new_crtc_state: the enabling state - * - * Performs potentially sleeping operations that must be done after the primary - * plane is enabled, such as updating FBC and IPS. Note that this may be - * called due to an explicit primary plane update, or due to an implicit - * re-enable that is caused when a sprite plane is updated to no longer - * completely hide the primary plane. - */ -static void -intel_post_enable_primary(struct drm_crtc *crtc, - const struct intel_crtc_state *new_crtc_state) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; - - /* - * Gen2 reports pipe underruns whenever all planes are disabled. - * So don't enable underrun reporting before at least some planes - * are enabled. - * FIXME: Need to fix the logic to work when we turn off all planes - * but leave the pipe running. - */ - if (IS_GEN(dev_priv, 2)) - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - - /* Underruns don't always raise interrupts, so check manually. */ - intel_check_cpu_fifo_underruns(dev_priv); - intel_check_pch_fifo_underruns(dev_priv); -} - -/* FIXME get rid of this and use pre_plane_update */ -static void -intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; - - /* - * Gen2 reports pipe underruns whenever all planes are disabled. - * So disable underrun reporting before all the planes get disabled. - */ - if (IS_GEN(dev_priv, 2)) - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - - hsw_disable_ips(to_intel_crtc_state(crtc->state)); - - /* - * Vblank time updates from the shadow to live plane control register - * are blocked if the memory self-refresh mode is active at that - * moment. So to make sure the plane gets truly disabled, disable - * first the self-refresh mode. The self-refresh enable bit in turn - * will be checked/applied by the HW only at the next frame start - * event which is after the vblank start event, so we need to have a - * wait-for-vblank between disabling the plane and the pipe. - */ - if (HAS_GMCH(dev_priv) && - intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, pipe); -} - static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!old_crtc_state->ips_enabled) @@ -5846,7 +6160,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s * Disable IPS before we program the LUT. */ if (IS_HASWELL(dev_priv) && - (new_crtc_state->base.color_mgmt_changed || + (new_crtc_state->uapi.color_mgmt_changed || new_crtc_state->update_pipe) && new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) return true; @@ -5857,7 +6171,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!new_crtc_state->ips_enabled) @@ -5873,7 +6187,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s * Re-enable IPS after the LUT has been programmed. */ if (IS_HASWELL(dev_priv) && - (new_crtc_state->base.color_mgmt_changed || + (new_crtc_state->uapi.color_mgmt_changed || new_crtc_state->update_pipe) && new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) return true; @@ -5883,15 +6197,16 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s * forcibly enable IPS on the first fastset. */ if (new_crtc_state->update_pipe && - old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) + old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) return true; return !old_crtc_state->ips_enabled; } -static bool needs_nv12_wa(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state) +static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + if (!crtc_state->nv12_planes) return false; @@ -5902,9 +6217,10 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv, return false; } -static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state) +static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + /* Wa_2006604312:icl */ if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv)) return true; @@ -5912,89 +6228,82 @@ static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv, return false; } -static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) +static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *state = old_crtc_state->base.state; - struct intel_crtc_state *pipe_config = - intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), - crtc); - struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(state, primary); + return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && + new_crtc_state->active_planes; +} - intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); +static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + return old_crtc_state->active_planes && + (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); +} - if (pipe_config->update_wm_post && pipe_config->base.active) - intel_update_watermarks(crtc); +static void intel_post_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_plane *primary = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *new_primary_state = + intel_atomic_get_new_plane_state(state, primary); + enum pipe pipe = crtc->pipe; + + intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); - if (hsw_post_update_enable_ips(old_crtc_state, pipe_config)) - hsw_enable_ips(pipe_config); + if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) + intel_update_watermarks(crtc); - if (old_primary_state) { - struct drm_plane_state *new_primary_state = - drm_atomic_get_new_plane_state(state, primary); + if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) + hsw_enable_ips(new_crtc_state); + if (new_primary_state) intel_fbc_post_update(crtc); - if (new_primary_state->visible && - (needs_modeset(pipe_config) || - !old_primary_state->visible)) - intel_post_enable_primary(&crtc->base, pipe_config); - } - - if (needs_nv12_wa(dev_priv, old_crtc_state) && - !needs_nv12_wa(dev_priv, pipe_config)) - skl_wa_827(dev_priv, crtc->pipe, false); + if (needs_nv12_wa(old_crtc_state) && + !needs_nv12_wa(new_crtc_state)) + skl_wa_827(dev_priv, pipe, false); - if (needs_scalerclk_wa(dev_priv, old_crtc_state) && - !needs_scalerclk_wa(dev_priv, pipe_config)) - icl_wa_scalerclkgating(dev_priv, crtc->pipe, false); + if (needs_scalerclk_wa(old_crtc_state) && + !needs_scalerclk_wa(new_crtc_state)) + icl_wa_scalerclkgating(dev_priv, pipe, false); } -static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *pipe_config) +static void intel_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *state = old_crtc_state->base.state; - struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(state, primary); - bool modeset = needs_modeset(pipe_config); - struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_plane *primary = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *new_primary_state = + intel_atomic_get_new_plane_state(state, primary); + enum pipe pipe = crtc->pipe; - if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) + if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) hsw_disable_ips(old_crtc_state); - if (old_primary_state) { - struct intel_plane_state *new_primary_state = - intel_atomic_get_new_plane_state(intel_state, - to_intel_plane(primary)); - - intel_fbc_pre_update(crtc, pipe_config, new_primary_state); - /* - * Gen2 reports pipe underruns whenever all planes are disabled. - * So disable underrun reporting before all the planes get disabled. - */ - if (IS_GEN(dev_priv, 2) && old_primary_state->visible && - (modeset || !new_primary_state->base.visible)) - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - } + if (new_primary_state && + intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state)) + intel_wait_for_vblank(dev_priv, pipe); /* Display WA 827 */ - if (!needs_nv12_wa(dev_priv, old_crtc_state) && - needs_nv12_wa(dev_priv, pipe_config)) - skl_wa_827(dev_priv, crtc->pipe, true); + if (!needs_nv12_wa(old_crtc_state) && + needs_nv12_wa(new_crtc_state)) + skl_wa_827(dev_priv, pipe, true); /* Wa_2006604312:icl */ - if (!needs_scalerclk_wa(dev_priv, old_crtc_state) && - needs_scalerclk_wa(dev_priv, pipe_config)) - icl_wa_scalerclkgating(dev_priv, crtc->pipe, true); + if (!needs_scalerclk_wa(old_crtc_state) && + needs_scalerclk_wa(new_crtc_state)) + icl_wa_scalerclkgating(dev_priv, pipe, true); /* * Vblank time updates from the shadow to live plane control register @@ -6005,9 +6314,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ - if (HAS_GMCH(dev_priv) && old_crtc_state->base.active && - pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, crtc->pipe); + if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && + new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) + intel_wait_for_vblank(dev_priv, pipe); /* * IVB workaround: must disable low power watermarks for at least @@ -6016,36 +6325,45 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * * WaCxSRDisabledForSpriteScaling:ivb */ - if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && - old_crtc_state->base.active) - intel_wait_for_vblank(dev_priv, crtc->pipe); + if (old_crtc_state->hw.active && + new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) + intel_wait_for_vblank(dev_priv, pipe); /* - * If we're doing a modeset, we're done. No need to do any pre-vblank - * watermark programming here. + * If we're doing a modeset we don't need to do any + * pre-vblank watermark programming here. */ - if (needs_modeset(pipe_config)) - return; + if (!needs_modeset(new_crtc_state)) { + /* + * For platforms that support atomic watermarks, program the + * 'intermediate' watermarks immediately. On pre-gen9 platforms, these + * will be the intermediate values that are safe for both pre- and + * post- vblank; when vblank happens, the 'active' values will be set + * to the final 'target' values and we'll do this again to get the + * optimal watermarks. For gen9+ platforms, the values we program here + * will be the final target values which will get automatically latched + * at vblank time; no further programming will be necessary. + * + * If a platform hasn't been transitioned to atomic watermarks yet, + * we'll continue to update watermarks the old way, if flags tell + * us to. + */ + if (dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(state, crtc); + else if (new_crtc_state->update_wm_pre) + intel_update_watermarks(crtc); + } /* - * For platforms that support atomic watermarks, program the - * 'intermediate' watermarks immediately. On pre-gen9 platforms, these - * will be the intermediate values that are safe for both pre- and - * post- vblank; when vblank happens, the 'active' values will be set - * to the final 'target' values and we'll do this again to get the - * optimal watermarks. For gen9+ platforms, the values we program here - * will be the final target values which will get automatically latched - * at vblank time; no further programming will be necessary. + * Gen2 reports pipe underruns whenever all planes are disabled. + * So disable underrun reporting before all the planes get disabled. * - * If a platform hasn't been transitioned to atomic watermarks yet, - * we'll continue to update watermarks the old way, if flags tell - * us to. + * We do this after .initial_watermarks() so that we have a + * chance of catching underruns with the intermediate watermarks + * vs. the old plane configuration. */ - if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(intel_state, - pipe_config); - else if (pipe_config->update_wm_pre) - intel_update_watermarks(crtc); + if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); } static void intel_crtc_disable_planes(struct intel_atomic_state *state, @@ -6069,7 +6387,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, intel_disable_plane(plane, new_crtc_state); - if (old_plane_state->base.visible) + if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; } @@ -6168,11 +6486,12 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state) } } -static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *conn_state; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; @@ -6188,11 +6507,12 @@ static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, } } -static void intel_encoders_pre_enable(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_pre_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *conn_state; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; @@ -6208,11 +6528,12 @@ static void intel_encoders_pre_enable(struct intel_crtc *crtc, } } -static void intel_encoders_enable(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *conn_state; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; @@ -6229,11 +6550,12 @@ static void intel_encoders_enable(struct intel_crtc *crtc, } } -static void intel_encoders_disable(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *old_conn_state; + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; @@ -6250,11 +6572,12 @@ static void intel_encoders_disable(struct intel_crtc *crtc, } } -static void intel_encoders_post_disable(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *old_conn_state; + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; @@ -6270,11 +6593,12 @@ static void intel_encoders_post_disable(struct intel_crtc *crtc, } } -static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *old_conn_state; + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; @@ -6290,11 +6614,12 @@ static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, } } -static void intel_encoders_update_pipe(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_atomic_state *state) +static void intel_encoders_update_pipe(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_connector_state *conn_state; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_connector_state *conn_state; struct drm_connector *conn; int i; @@ -6312,22 +6637,21 @@ static void intel_encoders_update_pipe(struct intel_crtc *crtc, static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); plane->disable_plane(plane, crtc_state); } -static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, - struct intel_atomic_state *state) +static void ironlake_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; - if (WARN_ON(intel_crtc->active)) + if (WARN_ON(crtc->active)) return; /* @@ -6343,61 +6667,59 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (pipe_config->has_pch_encoder) - intel_prepare_shared_dpll(pipe_config); + if (new_crtc_state->has_pch_encoder) + intel_prepare_shared_dpll(new_crtc_state); - if (intel_crtc_has_dp_encoder(pipe_config)) - intel_dp_set_m_n(pipe_config, M1_N1); + if (intel_crtc_has_dp_encoder(new_crtc_state)) + intel_dp_set_m_n(new_crtc_state, M1_N1); - intel_set_pipe_timings(pipe_config); - intel_set_pipe_src_size(pipe_config); + intel_set_pipe_timings(new_crtc_state); + intel_set_pipe_src_size(new_crtc_state); - if (pipe_config->has_pch_encoder) { - intel_cpu_transcoder_set_m_n(pipe_config, - &pipe_config->fdi_m_n, NULL); - } + if (new_crtc_state->has_pch_encoder) + intel_cpu_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->fdi_m_n, NULL); - ironlake_set_pipeconf(pipe_config); + ironlake_set_pipeconf(new_crtc_state); - intel_crtc->active = true; + crtc->active = true; - intel_encoders_pre_enable(intel_crtc, pipe_config, state); + intel_encoders_pre_enable(state, crtc); - if (pipe_config->has_pch_encoder) { + if (new_crtc_state->has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the * cpu pipes, hence this is separate from all the other fdi/pch * enabling. */ - ironlake_fdi_pll_enable(pipe_config); + ironlake_fdi_pll_enable(new_crtc_state); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); } - ironlake_pfit_enable(pipe_config); + ironlake_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(pipe_config); - intel_color_commit(pipe_config); + intel_color_load_luts(new_crtc_state); + intel_color_commit(new_crtc_state); /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(pipe_config); + intel_disable_primary_plane(new_crtc_state); - if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(state, pipe_config); - intel_enable_pipe(pipe_config); + if (dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(state, crtc); + intel_enable_pipe(new_crtc_state); - if (pipe_config->has_pch_encoder) - ironlake_pch_enable(state, pipe_config); + if (new_crtc_state->has_pch_encoder) + ironlake_pch_enable(state, new_crtc_state); - assert_vblank_disabled(crtc); - intel_crtc_vblank_on(pipe_config); + intel_crtc_vblank_on(new_crtc_state); - intel_encoders_enable(intel_crtc, pipe_config, state); + intel_encoders_enable(state, crtc); if (HAS_PCH_CPT(dev_priv)) - cpt_verify_modeset(dev, intel_crtc->pipe); + cpt_verify_modeset(dev_priv, pipe); /* * Must wait for vblank to avoid spurious PCH FIFO underruns. @@ -6405,7 +6727,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, * some interlaced HDMI modes. Let's do the double wait always * in case there are more corner cases we don't know about. */ - if (pipe_config->has_pch_encoder) { + if (new_crtc_state->has_pch_encoder) { intel_wait_for_vblank(dev_priv, pipe); intel_wait_for_vblank(dev_priv, pipe); } @@ -6452,103 +6774,112 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); } -static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, - struct intel_atomic_state *state) +static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe; - enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); + u32 val; + + val = I915_READ(reg); + val &= ~HSW_FRAME_START_DELAY_MASK; + val |= HSW_FRAME_START_DELAY(0); + I915_WRITE(reg, val); +} + +static void haswell_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe, hsw_workaround_pipe; + enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; bool psl_clkgate_wa; - if (WARN_ON(intel_crtc->active)) + if (WARN_ON(crtc->active)) return; - intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); + intel_encoders_pre_pll_enable(state, crtc); - if (pipe_config->shared_dpll) - intel_enable_shared_dpll(pipe_config); + if (new_crtc_state->shared_dpll) + intel_enable_shared_dpll(new_crtc_state); - intel_encoders_pre_enable(intel_crtc, pipe_config, state); + intel_encoders_pre_enable(state, crtc); - if (intel_crtc_has_dp_encoder(pipe_config)) - intel_dp_set_m_n(pipe_config, M1_N1); + if (intel_crtc_has_dp_encoder(new_crtc_state)) + intel_dp_set_m_n(new_crtc_state, M1_N1); if (!transcoder_is_dsi(cpu_transcoder)) - intel_set_pipe_timings(pipe_config); + intel_set_pipe_timings(new_crtc_state); if (INTEL_GEN(dev_priv) >= 11) - icl_enable_trans_port_sync(pipe_config); + icl_enable_trans_port_sync(new_crtc_state); - intel_set_pipe_src_size(pipe_config); + intel_set_pipe_src_size(new_crtc_state); if (cpu_transcoder != TRANSCODER_EDP && - !transcoder_is_dsi(cpu_transcoder)) { + !transcoder_is_dsi(cpu_transcoder)) I915_WRITE(PIPE_MULT(cpu_transcoder), - pipe_config->pixel_multiplier - 1); - } + new_crtc_state->pixel_multiplier - 1); - if (pipe_config->has_pch_encoder) { - intel_cpu_transcoder_set_m_n(pipe_config, - &pipe_config->fdi_m_n, NULL); - } + if (new_crtc_state->has_pch_encoder) + intel_cpu_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->fdi_m_n, NULL); - if (!transcoder_is_dsi(cpu_transcoder)) - haswell_set_pipeconf(pipe_config); + if (!transcoder_is_dsi(cpu_transcoder)) { + hsw_set_frame_start_delay(new_crtc_state); + haswell_set_pipeconf(new_crtc_state); + } if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipemisc(pipe_config); + bdw_set_pipemisc(new_crtc_state); - intel_crtc->active = true; + crtc->active = true; /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - pipe_config->pch_pfit.enabled; + new_crtc_state->pch_pfit.enabled; if (psl_clkgate_wa) glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); if (INTEL_GEN(dev_priv) >= 9) - skylake_pfit_enable(pipe_config); + skylake_pfit_enable(new_crtc_state); else - ironlake_pfit_enable(pipe_config); + ironlake_pfit_enable(new_crtc_state); /* * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_color_load_luts(pipe_config); - intel_color_commit(pipe_config); + intel_color_load_luts(new_crtc_state); + intel_color_commit(new_crtc_state); /* update DSPCNTR to configure gamma/csc for pipe bottom color */ if (INTEL_GEN(dev_priv) < 9) - intel_disable_primary_plane(pipe_config); + intel_disable_primary_plane(new_crtc_state); if (INTEL_GEN(dev_priv) >= 11) - icl_set_pipe_chicken(intel_crtc); + icl_set_pipe_chicken(crtc); if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_enable_transcoder_func(pipe_config); + intel_ddi_enable_transcoder_func(new_crtc_state); - if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(state, pipe_config); + if (dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(state, crtc); if (INTEL_GEN(dev_priv) >= 11) - icl_pipe_mbus_enable(intel_crtc); + icl_pipe_mbus_enable(crtc); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ if (!transcoder_is_dsi(cpu_transcoder)) - intel_enable_pipe(pipe_config); - - if (pipe_config->has_pch_encoder) - lpt_pch_enable(state, pipe_config); + intel_enable_pipe(new_crtc_state); - if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) - intel_ddi_set_vc_payload_alloc(pipe_config, true); + if (new_crtc_state->has_pch_encoder) + lpt_pch_enable(state, new_crtc_state); - assert_vblank_disabled(crtc); - intel_crtc_vblank_on(pipe_config); + intel_crtc_vblank_on(new_crtc_state); - intel_encoders_enable(intel_crtc, pipe_config, state); + intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { intel_wait_for_vblank(dev_priv, pipe); @@ -6557,16 +6888,16 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ - hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; + hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); } } -static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) +void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -6579,14 +6910,13 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) } } -static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *state) +static void ironlake_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_crtc *crtc = old_crtc_state->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; /* * Sometimes spurious CPU pipe underruns happen when the @@ -6596,10 +6926,9 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - intel_encoders_disable(intel_crtc, old_crtc_state, state); + intel_encoders_disable(state, crtc); - drm_crtc_vblank_off(crtc); - assert_vblank_disabled(crtc); + intel_crtc_vblank_off(old_crtc_state); intel_disable_pipe(old_crtc_state); @@ -6608,7 +6937,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, if (old_crtc_state->has_pch_encoder) ironlake_fdi_disable(crtc); - intel_encoders_post_disable(intel_crtc, old_crtc_state, state); + intel_encoders_post_disable(state, crtc); if (old_crtc_state->has_pch_encoder) { ironlake_disable_pch_transcoder(dev_priv, pipe); @@ -6631,54 +6960,27 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, I915_WRITE(PCH_DPLL_SEL, temp); } - ironlake_fdi_pll_disable(intel_crtc); + ironlake_fdi_pll_disable(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } -static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *state) +static void haswell_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_crtc *crtc = old_crtc_state->base.crtc; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - - intel_encoders_disable(intel_crtc, old_crtc_state, state); - - drm_crtc_vblank_off(crtc); - assert_vblank_disabled(crtc); - - /* XXX: Do the pipe assertions at the right place for BXT DSI. */ - if (!transcoder_is_dsi(cpu_transcoder)) - intel_disable_pipe(old_crtc_state); - - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) - intel_ddi_set_vc_payload_alloc(old_crtc_state, false); - - if (INTEL_GEN(dev_priv) >= 11) - icl_disable_transcoder_port_sync(old_crtc_state); - - if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_disable_transcoder_func(old_crtc_state); - - intel_dsc_disable(old_crtc_state); - - if (INTEL_GEN(dev_priv) >= 9) - skylake_scaler_disable(intel_crtc); - else - ironlake_pfit_disable(old_crtc_state); - - intel_encoders_post_disable(intel_crtc, old_crtc_state, state); - - intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); + /* + * FIXME collapse everything to one hook. + * Need care with mst->ddi interactions. + */ + intel_encoders_disable(state, crtc); + intel_encoders_post_disable(state, crtc); } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!crtc_state->gmch_pfit.control) @@ -6689,7 +6991,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) * according to register description and PRM. */ WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); @@ -6814,14 +7116,14 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_encoder *encoder; enum pipe pipe = crtc->pipe; u64 mask; enum transcoder transcoder = crtc_state->cpu_transcoder; - if (!crtc_state->base.active) + if (!crtc_state->hw.active) return 0; mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); @@ -6831,7 +7133,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); drm_for_each_encoder_mask(encoder, &dev_priv->drm, - crtc_state->base.encoder_mask) { + crtc_state->uapi.encoder_mask) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); mask |= BIT_ULL(intel_encoder->power_domain); @@ -6849,7 +7151,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) static u64 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; u64 domains, new_domains, old_domains; @@ -6875,146 +7177,140 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, intel_display_power_put_unchecked(dev_priv, domain); } -static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, - struct intel_atomic_state *state) +static void valleyview_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; - if (WARN_ON(intel_crtc->active)) + if (WARN_ON(crtc->active)) return; - if (intel_crtc_has_dp_encoder(pipe_config)) - intel_dp_set_m_n(pipe_config, M1_N1); + if (intel_crtc_has_dp_encoder(new_crtc_state)) + intel_dp_set_m_n(new_crtc_state, M1_N1); - intel_set_pipe_timings(pipe_config); - intel_set_pipe_src_size(pipe_config); + intel_set_pipe_timings(new_crtc_state); + intel_set_pipe_src_size(new_crtc_state); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); I915_WRITE(CHV_CANVAS(pipe), 0); } - i9xx_set_pipeconf(pipe_config); + i9xx_set_pipeconf(new_crtc_state); - intel_crtc->active = true; + crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); + intel_encoders_pre_pll_enable(state, crtc); if (IS_CHERRYVIEW(dev_priv)) { - chv_prepare_pll(intel_crtc, pipe_config); - chv_enable_pll(intel_crtc, pipe_config); + chv_prepare_pll(crtc, new_crtc_state); + chv_enable_pll(crtc, new_crtc_state); } else { - vlv_prepare_pll(intel_crtc, pipe_config); - vlv_enable_pll(intel_crtc, pipe_config); + vlv_prepare_pll(crtc, new_crtc_state); + vlv_enable_pll(crtc, new_crtc_state); } - intel_encoders_pre_enable(intel_crtc, pipe_config, state); + intel_encoders_pre_enable(state, crtc); - i9xx_pfit_enable(pipe_config); + i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(pipe_config); - intel_color_commit(pipe_config); + intel_color_load_luts(new_crtc_state); + intel_color_commit(new_crtc_state); /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(pipe_config); + intel_disable_primary_plane(new_crtc_state); - dev_priv->display.initial_watermarks(state, pipe_config); - intel_enable_pipe(pipe_config); + dev_priv->display.initial_watermarks(state, crtc); + intel_enable_pipe(new_crtc_state); - assert_vblank_disabled(crtc); - intel_crtc_vblank_on(pipe_config); + intel_crtc_vblank_on(new_crtc_state); - intel_encoders_enable(intel_crtc, pipe_config, state); + intel_encoders_enable(state, crtc); } static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); } -static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, - struct intel_atomic_state *state) +static void i9xx_crtc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; - if (WARN_ON(intel_crtc->active)) + if (WARN_ON(crtc->active)) return; - i9xx_set_pll_dividers(pipe_config); + i9xx_set_pll_dividers(new_crtc_state); - if (intel_crtc_has_dp_encoder(pipe_config)) - intel_dp_set_m_n(pipe_config, M1_N1); + if (intel_crtc_has_dp_encoder(new_crtc_state)) + intel_dp_set_m_n(new_crtc_state, M1_N1); - intel_set_pipe_timings(pipe_config); - intel_set_pipe_src_size(pipe_config); + intel_set_pipe_timings(new_crtc_state); + intel_set_pipe_src_size(new_crtc_state); - i9xx_set_pipeconf(pipe_config); + i9xx_set_pipeconf(new_crtc_state); - intel_crtc->active = true; + crtc->active = true; if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_encoders_pre_enable(intel_crtc, pipe_config, state); + intel_encoders_pre_enable(state, crtc); - i9xx_enable_pll(intel_crtc, pipe_config); + i9xx_enable_pll(crtc, new_crtc_state); - i9xx_pfit_enable(pipe_config); + i9xx_pfit_enable(new_crtc_state); - intel_color_load_luts(pipe_config); - intel_color_commit(pipe_config); + intel_color_load_luts(new_crtc_state); + intel_color_commit(new_crtc_state); /* update DSPCNTR to configure gamma for pipe bottom color */ - intel_disable_primary_plane(pipe_config); + intel_disable_primary_plane(new_crtc_state); - if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(state, - pipe_config); + if (dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(state, crtc); else - intel_update_watermarks(intel_crtc); - intel_enable_pipe(pipe_config); + intel_update_watermarks(crtc); + intel_enable_pipe(new_crtc_state); - assert_vblank_disabled(crtc); - intel_crtc_vblank_on(pipe_config); + intel_crtc_vblank_on(new_crtc_state); - intel_encoders_enable(intel_crtc, pipe_config, state); + intel_encoders_enable(state, crtc); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!old_crtc_state->gmch_pfit.control) return; - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", I915_READ(PFIT_CONTROL)); I915_WRITE(PFIT_CONTROL, 0); } -static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *state) +static void i9xx_crtc_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_crtc *crtc = old_crtc_state->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; /* * On gen2 planes are double buffered but the pipe isn't, so we must @@ -7023,16 +7319,15 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, if (IS_GEN(dev_priv, 2)) intel_wait_for_vblank(dev_priv, pipe); - intel_encoders_disable(intel_crtc, old_crtc_state, state); + intel_encoders_disable(state, crtc); - drm_crtc_vblank_off(crtc); - assert_vblank_disabled(crtc); + intel_crtc_vblank_off(old_crtc_state); intel_disable_pipe(old_crtc_state); i9xx_pfit_disable(old_crtc_state); - intel_encoders_post_disable(intel_crtc, old_crtc_state, state); + intel_encoders_post_disable(state, crtc); if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev_priv)) @@ -7043,92 +7338,97 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, i9xx_disable_pll(old_crtc_state); } - intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); + intel_encoders_post_pll_disable(state, crtc); if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); if (!dev_priv->display.initial_watermarks) - intel_update_watermarks(intel_crtc); + intel_update_watermarks(crtc); /* clock the pipe down to 640x480@60 to potentially save power */ if (IS_I830(dev_priv)) i830_enable_pipe(dev_priv, pipe); } -static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, +static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *encoder; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_bw_state *bw_state = to_intel_bw_state(dev_priv->bw_obj.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); enum intel_display_power_domain domain; struct intel_plane *plane; - u64 domains; struct drm_atomic_state *state; - struct intel_crtc_state *crtc_state; + struct intel_crtc_state *temp_crtc_state; + enum pipe pipe = crtc->pipe; + u64 domains; int ret; - if (!intel_crtc->active) + if (!crtc_state->hw.active) return; - for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - if (plane_state->base.visible) - intel_plane_disable_noatomic(intel_crtc, plane); + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); } - state = drm_atomic_state_alloc(crtc->dev); + state = drm_atomic_state_alloc(&dev_priv->drm); if (!state) { DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", - crtc->base.id, crtc->name); + crtc->base.base.id, crtc->base.name); return; } state->acquire_ctx = ctx; /* Everything's already locked, -EDEADLK can't happen. */ - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); - ret = drm_atomic_add_affected_connectors(state, crtc); + temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); + ret = drm_atomic_add_affected_connectors(state, &crtc->base); - WARN_ON(IS_ERR(crtc_state) || ret); + WARN_ON(IS_ERR(temp_crtc_state) || ret); - dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); + dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); drm_atomic_state_put(state); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", - crtc->base.id, crtc->name); + crtc->base.base.id, crtc->base.name); + + crtc->active = false; + crtc->base.enabled = false; - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); - crtc->state->active = false; - intel_crtc->active = false; - crtc->enabled = false; - crtc->state->connector_mask = 0; - crtc->state->encoder_mask = 0; + WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); + crtc_state->uapi.active = false; + crtc_state->uapi.connector_mask = 0; + crtc_state->uapi.encoder_mask = 0; + intel_crtc_free_hw_state(crtc_state); + memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); - for_each_encoder_on_crtc(crtc->dev, crtc, encoder) + for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder) encoder->base.crtc = NULL; - intel_fbc_disable(intel_crtc); - intel_update_watermarks(intel_crtc); - intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); + intel_fbc_disable(crtc); + intel_update_watermarks(crtc); + intel_disable_shared_dpll(crtc_state); - domains = intel_crtc->enabled_power_domains; + domains = crtc->enabled_power_domains; for_each_power_domain(domain, domains) intel_display_power_put_unchecked(dev_priv, domain); - intel_crtc->enabled_power_domains = 0; + crtc->enabled_power_domains = 0; - dev_priv->active_pipes &= ~BIT(intel_crtc->pipe); - dev_priv->min_cdclk[intel_crtc->pipe] = 0; - dev_priv->min_voltage_level[intel_crtc->pipe] = 0; + dev_priv->active_pipes &= ~BIT(pipe); + dev_priv->min_cdclk[pipe] = 0; + dev_priv->min_voltage_level[pipe] = 0; - bw_state->data_rate[intel_crtc->pipe] = 0; - bw_state->num_active_planes[intel_crtc->pipe] = 0; + bw_state->data_rate[pipe] = 0; + bw_state->num_active_planes[pipe] = 0; } /* @@ -7178,8 +7478,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, if (!crtc_state) return; - I915_STATE_WARN(!crtc_state->base.active, - "connector is active, but attached crtc isn't\n"); + I915_STATE_WARN(!crtc_state->hw.active, + "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) return; @@ -7190,8 +7490,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, "attached encoder crtc differs from connector crtc\n"); } else { - I915_STATE_WARN(crtc_state && crtc_state->base.active, - "attached crtc is active, but connector isn't\n"); + I915_STATE_WARN(crtc_state && crtc_state->hw.active, + "attached crtc is active, but connector isn't\n"); I915_STATE_WARN(!crtc_state && conn_state->best_encoder, "best encoder set without crtc!\n"); } @@ -7199,7 +7499,7 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) { - if (crtc_state->base.enable && crtc_state->has_pch_encoder) + if (crtc_state->hw.enable && crtc_state->has_pch_encoder) return crtc_state->fdi_lanes; return 0; @@ -7209,7 +7509,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *state = pipe_config->base.state; + struct drm_atomic_state *state = pipe_config->uapi.state; struct intel_crtc *other_crtc; struct intel_crtc_state *other_crtc_state; @@ -7282,7 +7582,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = intel_crtc->base.dev; - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int lane, link_bw, fdi_dotclock, ret; bool needs_recompute = false; @@ -7328,7 +7628,7 @@ retry: bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -7358,9 +7658,9 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = - to_i915(crtc_state->base.crtc->dev); + to_i915(crtc_state->uapi.crtc->dev); struct intel_atomic_state *intel_state = - to_intel_atomic_state(crtc_state->base.state); + to_intel_atomic_state(crtc_state->uapi.state); if (!hsw_crtc_state_ips_capable(crtc_state)) return false; @@ -7399,7 +7699,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) { u32 pixel_rate; - pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; + pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock; /* * We only use IF-ID interlacing. If we ever use @@ -7432,12 +7732,12 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (HAS_GMCH(dev_priv)) /* FIXME calculate proper pipe pixel rate for GMCH pfit */ crtc_state->pixel_rate = - crtc_state->base.adjusted_mode.crtc_clock; + crtc_state->hw.adjusted_mode.crtc_clock; else crtc_state->pixel_rate = ilk_pipe_pixel_rate(crtc_state); @@ -7447,7 +7747,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int clock_limit = dev_priv->max_dotclk_freq; if (INTEL_GEN(dev_priv) < 4) { @@ -7473,7 +7773,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && - pipe_config->base.ctm) { + pipe_config->hw.ctm) { /* * There is only one pipe CSC unit per pipe, and we need that * for output conversion from RGB->YCBCR. So if CTM is already @@ -7667,7 +7967,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -7694,7 +7994,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta const struct intel_link_m_n *m_n, const struct intel_link_m_n *m2_n2) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder transcoder = crtc_state->cpu_transcoder; @@ -8003,11 +8303,11 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); struct intel_crtc_state *pipe_config; - pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); + pipe_config = intel_crtc_state_alloc(crtc); if (!pipe_config) return -ENOMEM; - pipe_config->base.crtc = &crtc->base; + pipe_config->cpu_transcoder = (enum transcoder)pipe; pipe_config->pixel_multiplier = 1; pipe_config->dpll = *dpll; @@ -8167,11 +8467,11 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 crtc_vtotal, crtc_vblank_end; int vsyncshift = 0; @@ -8229,7 +8529,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -8243,7 +8543,7 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (IS_GEN(dev_priv, 2)) @@ -8265,39 +8565,39 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, u32 tmp; tmp = I915_READ(HTOTAL(cpu_transcoder)); - pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; - pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { tmp = I915_READ(HBLANK(cpu_transcoder)); - pipe_config->base.adjusted_mode.crtc_hblank_start = + pipe_config->hw.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; - pipe_config->base.adjusted_mode.crtc_hblank_end = + pipe_config->hw.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; } tmp = I915_READ(HSYNC(cpu_transcoder)); - pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; - pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; tmp = I915_READ(VTOTAL(cpu_transcoder)); - pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; - pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { tmp = I915_READ(VBLANK(cpu_transcoder)); - pipe_config->base.adjusted_mode.crtc_vblank_start = + pipe_config->hw.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; - pipe_config->base.adjusted_mode.crtc_vblank_end = + pipe_config->hw.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; } tmp = I915_READ(VSYNC(cpu_transcoder)); - pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; - pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; + pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; if (intel_pipe_is_interlaced(pipe_config)) { - pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; - pipe_config->base.adjusted_mode.crtc_vtotal += 1; - pipe_config->base.adjusted_mode.crtc_vblank_end += 1; + pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; + pipe_config->hw.adjusted_mode.crtc_vtotal += 1; + pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; } } @@ -8312,27 +8612,27 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, pipe_config->pipe_src_h = (tmp & 0xffff) + 1; pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; - pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; - pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; + pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; + pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; } void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config) { - mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; - mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; - mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; - mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; + mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; + mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; + mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; + mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; - mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; - mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; - mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; - mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; + mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; + mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; + mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; + mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; - mode->flags = pipe_config->base.adjusted_mode.flags; + mode->flags = pipe_config->hw.adjusted_mode.flags; mode->type = DRM_MODE_TYPE_DRIVER; - mode->clock = pipe_config->base.adjusted_mode.crtc_clock; + mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; mode->hsync = drm_mode_hsync(mode); mode->vrefresh = drm_mode_vrefresh(mode); @@ -8341,7 +8641,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 pipeconf; @@ -8378,7 +8678,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) } } - if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (INTEL_GEN(dev_priv) < 4 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; @@ -8394,6 +8694,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + pipeconf |= PIPECONF_FRAME_START_DELAY(0); + I915_WRITE(PIPECONF(crtc->pipe), pipeconf); POSTING_READ(PIPECONF(crtc->pipe)); } @@ -8787,7 +9089,7 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc) static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; @@ -8911,7 +9213,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, * but in case the pipe is enabled w/o any ports we need a sane * default. */ - pipe_config->base.adjusted_mode.crtc_clock = + pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock / pipe_config->pixel_multiplier; ret = true; @@ -9359,7 +9661,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; - bool pch_ssc_in_use = false; bool has_fdi = false; for_each_intel_encoder(&dev_priv->drm, encoder) { @@ -9387,22 +9688,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) * clock hierarchy. That would also allow us to do * clock bending finally. */ + dev_priv->pch_ssc_use = 0; + if (spll_uses_pch_ssc(dev_priv)) { DRM_DEBUG_KMS("SPLL using PCH SSC\n"); - pch_ssc_in_use = true; + dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); } if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); - pch_ssc_in_use = true; + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); } if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); - pch_ssc_in_use = true; + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); } - if (pch_ssc_in_use) + if (dev_priv->pch_ssc_use) return; if (has_fdi) { @@ -9426,7 +9729,7 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv) static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 val; @@ -9454,7 +9757,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= PIPECONF_INTERLACED_ILK; else val |= PIPECONF_PROGRESSIVE; @@ -9474,13 +9777,15 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + val |= PIPECONF_FRAME_START_DELAY(0); + I915_WRITE(PIPECONF(pipe), val); POSTING_READ(PIPECONF(pipe)); } static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; @@ -9488,7 +9793,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) if (IS_HASWELL(dev_priv) && crtc_state->dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= PIPECONF_INTERLACED_ILK; else val |= PIPECONF_PROGRESSIVE; @@ -9503,7 +9808,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val = 0; @@ -9689,7 +9994,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->base.state); + to_intel_atomic_state(crtc_state->uapi.state); const struct intel_limit *limit; int refclk = 120000; @@ -9902,7 +10207,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, case PLANE_CTL_TILED_Y: plane_config->tiling = I915_TILING_Y; if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) - fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; + fb->modifier = INTEL_GEN(dev_priv) >= 12 ? + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : + I915_FORMAT_MOD_Y_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; @@ -10115,7 +10422,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->base.state); + to_intel_atomic_state(crtc_state->uapi.state); if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || INTEL_GEN(dev_priv) >= 11) { @@ -10328,6 +10635,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, case TRANS_DDI_EDP_INPUT_C_ONOFF: trans_pipe = PIPE_C; break; + case TRANS_DDI_EDP_INPUT_D_ONOFF: + trans_pipe = PIPE_D; + break; } if (trans_pipe == crtc->pipe) { @@ -10416,16 +10726,21 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_shared_dpll *pll; enum port port; u32 tmp; - tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); - - if (INTEL_GEN(dev_priv) >= 12) - port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - else - port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); + if (transcoder_is_dsi(cpu_transcoder)) { + port = (cpu_transcoder == TRANSCODER_DSI_A) ? + PORT_A : PORT_B; + } else { + tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (INTEL_GEN(dev_priv) >= 12) + port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); + else + port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); + } if (INTEL_GEN(dev_priv) >= 11) icelake_get_ddi_pll(dev_priv, port, pipe_config); @@ -10481,7 +10796,7 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 transcoders; enum transcoder cpu_transcoder; @@ -10523,8 +10838,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, u64 power_domain_mask; bool active; - intel_crtc_init_scalers(crtc, pipe_config); - pipe_config->master_transcoder = INVALID_TRANSCODER; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); @@ -10650,8 +10963,8 @@ out: static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 base; @@ -10665,8 +10978,8 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) static u32 intel_cursor_position(const struct intel_plane_state *plane_state) { - int x = plane_state->base.dst.x1; - int y = plane_state->base.dst.y1; + int x = plane_state->uapi.dst.x1; + int y = plane_state->uapi.dst.y1; u32 pos = 0; if (x < 0) { @@ -10687,9 +11000,9 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state) static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) { const struct drm_mode_config *config = - &plane_state->base.plane->dev->mode_config; - int width = drm_rect_width(&plane_state->base.dst); - int height = drm_rect_height(&plane_state->base.dst); + &plane_state->uapi.plane->dev->mode_config; + int width = drm_rect_width(&plane_state->uapi.dst); + int height = drm_rect_height(&plane_state->uapi.dst); return width > 0 && width <= config->cursor_width && height > 0 && height <= config->cursor_height; @@ -10698,8 +11011,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) static int intel_cursor_check_surface(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - unsigned int rotation = plane_state->base.rotation; + to_i915(plane_state->uapi.plane->dev); + unsigned int rotation = plane_state->hw.rotation; int src_x, src_y; u32 offset; int ret; @@ -10708,11 +11021,11 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; - src_x = plane_state->base.src.x1 >> 16; - src_y = plane_state->base.src.y1 >> 16; + src_x = plane_state->uapi.src.x1 >> 16; + src_y = plane_state->uapi.src.y1 >> 16; intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&src_x, &src_y, @@ -10727,14 +11040,14 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) * Put the final coordinates back so that the src * coordinate checks will see the right values. */ - drm_rect_translate_to(&plane_state->base.src, + drm_rect_translate_to(&plane_state->uapi.src, src_x << 16, src_y << 16); /* ILK+ do this automagically in hardware */ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { - const struct drm_framebuffer *fb = plane_state->base.fb; - int src_w = drm_rect_width(&plane_state->base.src) >> 16; - int src_h = drm_rect_height(&plane_state->base.src) >> 16; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; offset += (src_h * src_w - 1) * fb->format->cpp[0]; } @@ -10749,7 +11062,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) static int intel_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { @@ -10757,8 +11070,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, return -EINVAL; } - ret = drm_atomic_helper_check_plane_state(&plane_state->base, - &crtc_state->base, + ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, + &crtc_state->uapi, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true); @@ -10766,14 +11079,14 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, return ret; /* Use the unclipped src/dst rectangles, which we program to hw */ - plane_state->base.src = drm_plane_state_src(&plane_state->base); - plane_state->base.dst = drm_plane_state_dest(&plane_state->base); + plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); + plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); ret = intel_cursor_check_surface(plane_state); if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); @@ -10811,7 +11124,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) { - int width = drm_rect_width(&plane_state->base.dst); + int width = drm_rect_width(&plane_state->uapi.dst); /* * 845g/865g are only limited by the width of their cursors, @@ -10823,7 +11136,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) static int i845_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; ret = intel_check_cursor(crtc_state, plane_state); @@ -10837,12 +11150,12 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i845_cursor_size_ok(plane_state)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", - drm_rect_width(&plane_state->base.dst), - drm_rect_height(&plane_state->base.dst)); + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst)); return -EINVAL; } - WARN_ON(plane_state->base.visible && + WARN_ON(plane_state->uapi.visible && plane_state->color_plane[0].stride != fb->pitches[0]); switch (fb->pitches[0]) { @@ -10870,9 +11183,9 @@ static void i845_update_cursor(struct intel_plane *plane, u32 cntl = 0, base = 0, pos = 0, size = 0; unsigned long irqflags; - if (plane_state && plane_state->base.visible) { - unsigned int width = drm_rect_width(&plane_state->base.src); - unsigned int height = drm_rect_height(&plane_state->base.dst); + if (plane_state && plane_state->uapi.visible) { + unsigned int width = drm_rect_width(&plane_state->uapi.dst); + unsigned int height = drm_rect_height(&plane_state->uapi.dst); cntl = plane_state->ctl | i845_cursor_ctl_crtc(crtc_state); @@ -10945,7 +11258,7 @@ i9xx_cursor_max_stride(struct intel_plane *plane, static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 cntl = 0; @@ -10968,13 +11281,13 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); + to_i915(plane_state->uapi.plane->dev); u32 cntl = 0; if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) cntl |= MCURSOR_TRICKLE_FEED_DISABLE; - switch (drm_rect_width(&plane_state->base.dst)) { + switch (drm_rect_width(&plane_state->uapi.dst)) { case 64: cntl |= MCURSOR_MODE_64_ARGB_AX; break; @@ -10985,11 +11298,11 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, cntl |= MCURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(drm_rect_width(&plane_state->base.dst)); + MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); return 0; } - if (plane_state->base.rotation & DRM_MODE_ROTATE_180) + if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) cntl |= MCURSOR_ROTATE_180; return cntl; @@ -10998,9 +11311,9 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - int width = drm_rect_width(&plane_state->base.dst); - int height = drm_rect_height(&plane_state->base.dst); + to_i915(plane_state->uapi.plane->dev); + int width = drm_rect_width(&plane_state->uapi.dst); + int height = drm_rect_height(&plane_state->uapi.dst); if (!intel_cursor_size_ok(plane_state)) return false; @@ -11022,7 +11335,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) * cursors. */ if (HAS_CUR_FBC(dev_priv) && - plane_state->base.rotation & DRM_MODE_ROTATE_0) { + plane_state->hw.rotation & DRM_MODE_ROTATE_0) { if (height < 8 || height > width) return false; } else { @@ -11036,9 +11349,9 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; int ret; @@ -11053,19 +11366,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i9xx_cursor_size_ok(plane_state)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", - drm_rect_width(&plane_state->base.dst), - drm_rect_height(&plane_state->base.dst)); + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst)); return -EINVAL; } - WARN_ON(plane_state->base.visible && + WARN_ON(plane_state->uapi.visible && plane_state->color_plane[0].stride != fb->pitches[0]); if (fb->pitches[0] != - drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) { + drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", fb->pitches[0], - drm_rect_width(&plane_state->base.dst)); + drm_rect_width(&plane_state->uapi.dst)); return -EINVAL; } @@ -11080,7 +11393,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, * Refuse the put the cursor into that compromised position. */ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && - plane_state->base.visible && plane_state->base.dst.x1 < 0) { + plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } @@ -11099,9 +11412,9 @@ static void i9xx_update_cursor(struct intel_plane *plane, u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; unsigned long irqflags; - if (plane_state && plane_state->base.visible) { - unsigned width = drm_rect_width(&plane_state->base.dst); - unsigned height = drm_rect_height(&plane_state->base.dst); + if (plane_state && plane_state->uapi.visible) { + unsigned width = drm_rect_width(&plane_state->uapi.dst); + unsigned height = drm_rect_height(&plane_state->uapi.dst); cntl = plane_state->ctl | i9xx_cursor_ctl_crtc(crtc_state); @@ -11252,7 +11565,6 @@ static int intel_modeset_disable_planes(struct drm_atomic_state *state, } int intel_get_load_detect_pipe(struct drm_connector *connector, - const struct drm_display_mode *mode, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx) { @@ -11357,12 +11669,10 @@ found: goto fail; } - crtc_state->base.active = crtc_state->base.enable = true; - - if (!mode) - mode = &load_detect_mode; + crtc_state->uapi.active = true; - ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); + ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi, + &load_detect_mode); if (ret) goto fail; @@ -11570,11 +11880,37 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, * we may need some idea for the dotclock anyway. * Calculate one based on the FDI configuration. */ - pipe_config->base.adjusted_mode.crtc_clock = + pipe_config->hw.adjusted_mode.crtc_clock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), &pipe_config->fdi_m_n); } +static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, + struct intel_crtc *crtc) +{ + memset(crtc_state, 0, sizeof(*crtc_state)); + + __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base); + + crtc_state->cpu_transcoder = INVALID_TRANSCODER; + crtc_state->master_transcoder = INVALID_TRANSCODER; + crtc_state->hsw_workaround_pipe = INVALID_PIPE; + crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; + crtc_state->scaler_state.scaler_id = -1; +} + +static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state; + + crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL); + + if (crtc_state) + intel_crtc_state_reset(crtc_state, crtc); + + return crtc_state; +} + /* Returns the currently programmed mode of the given encoder. */ struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) @@ -11594,14 +11930,12 @@ intel_encoder_current_mode(struct intel_encoder *encoder) if (!mode) return NULL; - crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + crtc_state = intel_crtc_state_alloc(crtc); if (!crtc_state) { kfree(mode); return NULL; } - crtc_state->base.crtc = &crtc->base; - if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { kfree(crtc_state); kfree(mode); @@ -11639,18 +11973,18 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur, struct intel_plane_state *new) { /* Update watermarks on tiling or size changes. */ - if (new->base.visible != cur->base.visible) + if (new->uapi.visible != cur->uapi.visible) return true; - if (!cur->base.fb || !new->base.fb) + if (!cur->hw.fb || !new->hw.fb) return false; - if (cur->base.fb->modifier != new->base.fb->modifier || - cur->base.rotation != new->base.rotation || - drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || - drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || - drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || - drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) + if (cur->hw.fb->modifier != new->hw.fb->modifier || + cur->hw.rotation != new->hw.rotation || + drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || + drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || + drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || + drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) return true; return false; @@ -11658,10 +11992,10 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur, static bool needs_scaling(const struct intel_plane_state *state) { - int src_w = drm_rect_width(&state->base.src) >> 16; - int src_h = drm_rect_height(&state->base.src) >> 16; - int dst_w = drm_rect_width(&state->base.dst); - int dst_h = drm_rect_height(&state->base.dst); + int src_w = drm_rect_width(&state->uapi.src) >> 16; + int src_h = drm_rect_height(&state->uapi.src) >> 16; + int dst_w = drm_rect_width(&state->uapi.dst); + int dst_h = drm_rect_height(&state->uapi.dst); return (src_w != dst_w || src_h != dst_h); } @@ -11671,12 +12005,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat const struct intel_plane_state *old_plane_state, struct intel_plane_state *plane_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); bool mode_changed = needs_modeset(crtc_state); - bool was_crtc_enabled = old_crtc_state->base.active; - bool is_crtc_enabled = crtc_state->base.active; + bool was_crtc_enabled = old_crtc_state->hw.active; + bool is_crtc_enabled = crtc_state->hw.active; bool turn_off, turn_on, visible, was_visible; int ret; @@ -11686,8 +12020,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat return ret; } - was_visible = old_plane_state->base.visible; - visible = plane_state->base.visible; + was_visible = old_plane_state->uapi.visible; + visible = plane_state->uapi.visible; if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; @@ -11703,9 +12037,10 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) { - plane_state->base.visible = visible = false; + plane_state->uapi.visible = visible = false; crtc_state->active_planes &= ~BIT(plane->id); crtc_state->data_rate[plane->id] = 0; + crtc_state->min_cdclk[plane->id] = 0; } if (!was_visible && !visible) @@ -11843,9 +12178,9 @@ static int icl_add_linked_planes(struct intel_atomic_state *state) static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct intel_plane *plane, *linked; struct intel_plane_state *plane_state; int i; @@ -11862,7 +12197,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) continue; plane_state->planar_linked_plane = NULL; - if (plane_state->planar_slave && !plane_state->base.visible) { + if (plane_state->planar_slave && !plane_state->uapi.visible) { crtc_state->active_planes &= ~BIT(plane->id); crtc_state->update_planes |= BIT(plane->id); } @@ -11908,6 +12243,25 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) crtc_state->active_planes |= BIT(linked->id); crtc_state->update_planes |= BIT(linked->id); DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); + + /* Copy parameters to slave plane */ + linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; + linked_state->color_ctl = plane_state->color_ctl; + memcpy(linked_state->color_plane, plane_state->color_plane, + sizeof(linked_state->color_plane)); + + intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); + linked_state->uapi.src = plane_state->uapi.src; + linked_state->uapi.dst = plane_state->uapi.dst; + + if (icl_is_hdr_plane(dev_priv, plane->id)) { + if (linked->id == PLANE_SPRITE5) + plane_state->cus_ctl |= PLANE_CUS_PLANE_7; + else if (linked->id == PLANE_SPRITE4) + plane_state->cus_ctl |= PLANE_CUS_PLANE_6; + else + MISSING_CASE(linked->id); + } } return 0; @@ -11915,9 +12269,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct intel_atomic_state *state = - to_intel_atomic_state(new_crtc_state->base.state); + to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -11926,9 +12280,9 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = crtc_state->base.crtc; - struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_crtc *crtc = crtc_state->uapi.crtc; + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct drm_connector *master_connector, *connector; struct drm_connector_state *connector_state; struct drm_connector_list_iter conn_iter; @@ -11952,8 +12306,8 @@ static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) continue; if (!connector->has_tile) continue; - if (crtc_state->base.mode.hdisplay != connector->tile_h_size || - crtc_state->base.mode.vdisplay != connector->tile_v_size) + if (crtc_state->hw.mode.hdisplay != connector->tile_h_size || + crtc_state->hw.mode.vdisplay != connector->tile_v_size) return 0; if (connector->tile_h_loc == connector->num_h_tile - 1 && connector->tile_v_loc == connector->num_v_tile - 1) @@ -12002,7 +12356,7 @@ static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) BIT(crtc_state->cpu_transcoder); DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", transcoder_name(crtc_state->master_transcoder), - crtc_state->base.crtc->base.id, + crtc_state->uapi.crtc->base.id, master_pipe_config->sync_mode_slaves_mask); } @@ -12019,10 +12373,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, int ret; if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && - mode_changed && !crtc_state->base.active) + mode_changed && !crtc_state->hw.active) crtc_state->update_wm_post = true; - if (mode_changed && crtc_state->base.enable && + if (mode_changed && crtc_state->hw.enable && dev_priv->display.crtc_compute_clock && !WARN_ON(crtc_state->shared_dpll)) { ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); @@ -12035,10 +12389,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, * when C8 planes are getting enabled/disabled. */ if (c8_planes_changed(crtc_state)) - crtc_state->base.color_mgmt_changed = true; + crtc_state->uapi.color_mgmt_changed = true; if (mode_changed || crtc_state->update_pipe || - crtc_state->base.color_mgmt_changed) { + crtc_state->uapi.color_mgmt_changed) { ret = intel_color_check(crtc_state); if (ret) return ret; @@ -12072,11 +12426,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) >= 9) { if (mode_changed || crtc_state->update_pipe) ret = skl_update_scaler_crtc(crtc_state); - - if (!ret) - ret = icl_check_nv12_planes(crtc_state); - if (!ret) - ret = skl_check_pipe_max_pixel_rate(crtc, crtc_state); if (!ret) ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); @@ -12156,7 +12505,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct drm_atomic_state *state = pipe_config->base.state; + struct drm_atomic_state *state = pipe_config->uapi.state; struct drm_connector *connector; struct drm_connector_state *connector_state; int bpp, i; @@ -12281,14 +12630,14 @@ static const char *output_formats(enum intel_output_format format) static void intel_dump_plane_state(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); - const struct drm_framebuffer *fb = plane_state->base.fb; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_format_name_buf format_name; if (!fb) { DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", plane->base.base.id, plane->base.name, - yesno(plane_state->base.visible)); + yesno(plane_state->uapi.visible)); return; } @@ -12296,20 +12645,20 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) plane->base.base.id, plane->base.name, fb->base.id, fb->width, fb->height, drm_get_format_name(fb->format->format, &format_name), - yesno(plane_state->base.visible)); + yesno(plane_state->uapi.visible)); DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", - plane_state->base.rotation, plane_state->scaler_id); - if (plane_state->base.visible) + plane_state->hw.rotation, plane_state->scaler_id); + if (plane_state->uapi.visible) DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", - DRM_RECT_FP_ARG(&plane_state->base.src), - DRM_RECT_ARG(&plane_state->base.dst)); + DRM_RECT_FP_ARG(&plane_state->uapi.src), + DRM_RECT_ARG(&plane_state->uapi.dst)); } static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, struct intel_atomic_state *state, const char *context) { - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_plane_state *plane_state; struct intel_plane *plane; @@ -12318,14 +12667,14 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", crtc->base.base.id, crtc->base.name, - yesno(pipe_config->base.enable), context); + yesno(pipe_config->hw.enable), context); - if (!pipe_config->base.enable) + if (!pipe_config->hw.enable) goto dump_planes; snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", - yesno(pipe_config->base.active), + yesno(pipe_config->hw.active), buf, pipe_config->output_types, output_formats(pipe_config->output_format)); @@ -12365,10 +12714,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); DRM_DEBUG_KMS("requested mode:\n"); - drm_mode_debug_printmodeline(&pipe_config->base.mode); + drm_mode_debug_printmodeline(&pipe_config->hw.mode); DRM_DEBUG_KMS("adjusted mode:\n"); - drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); - intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); + drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); + intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode); DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", pipe_config->port_clock, pipe_config->pipe_src_w, pipe_config->pipe_src_h, @@ -12426,6 +12775,12 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) bool ret = true; /* + * We're going to peek into connector->state, + * hence connection_mutex must be held. + */ + drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); + + /* * Walk the connector list instead of the encoder * list to detect the problem on ddi platforms * where there's just one encoder per digital port. @@ -12482,22 +12837,59 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) return ret; } +static void +intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) +{ + intel_crtc_copy_color_blobs(crtc_state); +} + +static void +intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) +{ + crtc_state->hw.enable = crtc_state->uapi.enable; + crtc_state->hw.active = crtc_state->uapi.active; + crtc_state->hw.mode = crtc_state->uapi.mode; + crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; + intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); +} + +static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) +{ + crtc_state->uapi.enable = crtc_state->hw.enable; + crtc_state->uapi.active = crtc_state->hw.active; + WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); + + crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; + + /* copy color blobs to uapi */ + drm_property_replace_blob(&crtc_state->uapi.degamma_lut, + crtc_state->hw.degamma_lut); + drm_property_replace_blob(&crtc_state->uapi.gamma_lut, + crtc_state->hw.gamma_lut); + drm_property_replace_blob(&crtc_state->uapi.ctm, + crtc_state->hw.ctm); +} + static int -clear_intel_crtc_state(struct intel_crtc_state *crtc_state) +intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = - to_i915(crtc_state->base.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *saved_state; - saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL); + saved_state = intel_crtc_state_alloc(crtc); if (!saved_state) return -ENOMEM; + /* free the old crtc_state->hw members */ + intel_crtc_free_hw_state(crtc_state); + /* FIXME: before the switch to atomic started, a new pipe_config was * kzalloc'd. Code that depends on any field being zero should be * fixed, so that the crtc_state can be safely duplicated. For now, * only fields that are know to not cause problems are preserved. */ + saved_state->uapi = crtc_state->uapi; saved_state->scaler_state = crtc_state->scaler_state; saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->dpll_hw_state = crtc_state->dpll_hw_state; @@ -12515,20 +12907,19 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) saved_state->sync_mode_slaves_mask = crtc_state->sync_mode_slaves_mask; - /* Keep base drm_crtc_state intact, only clear our extended struct */ - BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); - memcpy(&crtc_state->base + 1, &saved_state->base + 1, - sizeof(*crtc_state) - sizeof(crtc_state->base)); - + memcpy(crtc_state, saved_state, sizeof(*crtc_state)); kfree(saved_state); + + intel_crtc_copy_uapi_to_hw_state(crtc_state); + return 0; } static int intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) { - struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_atomic_state *state = pipe_config->base.state; + struct drm_crtc *crtc = pipe_config->uapi.crtc; + struct drm_atomic_state *state = pipe_config->uapi.state; struct intel_encoder *encoder; struct drm_connector *connector; struct drm_connector_state *connector_state; @@ -12536,10 +12927,6 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) int i; bool retry = true; - ret = clear_intel_crtc_state(pipe_config); - if (ret) - return ret; - pipe_config->cpu_transcoder = (enum transcoder) to_intel_crtc(crtc)->pipe; @@ -12548,13 +12935,13 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) * positive or negative polarity is requested, treat this as meaning * negative polarity. */ - if (!(pipe_config->base.adjusted_mode.flags & + if (!(pipe_config->hw.adjusted_mode.flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) - pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; + pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; - if (!(pipe_config->base.adjusted_mode.flags & + if (!(pipe_config->hw.adjusted_mode.flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) - pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; + pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), pipe_config); @@ -12571,7 +12958,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) * computation to clearly distinguish it from the adjusted mode, which * can be changed by the connectors in the below retry loop. */ - drm_mode_get_hv_timing(&pipe_config->base.mode, + drm_mode_get_hv_timing(&pipe_config->hw.mode, &pipe_config->pipe_src_w, &pipe_config->pipe_src_h); @@ -12604,7 +12991,7 @@ encoder_retry: pipe_config->pixel_multiplier = 1; /* Fill in default crtc timings, allow encoders to overwrite them. */ - drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, + drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, CRTC_STEREO_DOUBLE); /* Set the crtc_state defaults for trans_port_sync */ @@ -12638,7 +13025,7 @@ encoder_retry: /* Set default port clock if not overwritten by the encoder. Needs to be * done afterwards in case the encoder adjusts the mode. */ if (!pipe_config->port_clock) - pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock + pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock * pipe_config->pixel_multiplier; ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); @@ -12667,6 +13054,12 @@ encoder_retry: DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", base_bpp, pipe_config->pipe_bpp, pipe_config->dither); + /* + * Make drm_calc_timestamping_constants in + * drm_atomic_helper_update_legacy_modeset_state() happy + */ + pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; + return 0; } @@ -12805,13 +13198,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, const struct intel_crtc_state *pipe_config, bool fastset) { - struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); bool ret = true; u32 bp_gamma = 0; bool fixup_inherited = fastset && - (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && - !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); + (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) && + !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED); if (fixup_inherited && !fastboot_enabled(dev_priv)) { DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); @@ -13000,19 +13393,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(output_types); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_I(output_format); @@ -13029,17 +13422,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); - PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_INTERLACE); if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { - PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_PHSYNC); - PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_NHSYNC); - PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_PVSYNC); - PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, DRM_MODE_FLAG_NVSYNC); } @@ -13078,7 +13471,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); if (bp_gamma) - PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma); + PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); } @@ -13123,7 +13516,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) PIPE_CONF_CHECK_I(pipe_bpp); - PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); PIPE_CONF_CHECK_I(min_voltage_level); @@ -13138,6 +13531,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(sync_mode_slaves_mask); PIPE_CONF_CHECK_I(master_transcoder); + PIPE_CONF_CHECK_I(dsc.compression_enable); + PIPE_CONF_CHECK_I(dsc.dsc_split); + PIPE_CONF_CHECK_I(dsc.compressed_bpp); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL @@ -13157,7 +13554,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, if (pipe_config->has_pch_encoder) { int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), &pipe_config->fdi_m_n); - int dotclock = pipe_config->base.adjusted_mode.crtc_clock; + int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; /* * FDI already provided one idea for the dotclock. @@ -13185,7 +13582,7 @@ static void verify_wm_state(struct intel_crtc *crtc, const enum pipe pipe = crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) + if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) return; hw = kzalloc(sizeof(*hw), GFP_KERNEL); @@ -13390,16 +13787,14 @@ verify_crtc_state(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; - struct intel_crtc_state *pipe_config; - struct drm_atomic_state *state; + struct intel_crtc_state *pipe_config = old_crtc_state; + struct drm_atomic_state *state = old_crtc_state->uapi.state; bool active; - state = old_crtc_state->base.state; - __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); - pipe_config = old_crtc_state; - memset(pipe_config, 0, sizeof(*pipe_config)); - pipe_config->base.crtc = &crtc->base; - pipe_config->base.state = state; + __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); + intel_crtc_free_hw_state(old_crtc_state); + intel_crtc_state_reset(old_crtc_state, crtc); + old_crtc_state->uapi.state = state; DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); @@ -13407,23 +13802,26 @@ verify_crtc_state(struct intel_crtc *crtc, /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) - active = new_crtc_state->base.active; + active = new_crtc_state->hw.active; - I915_STATE_WARN(new_crtc_state->base.active != active, - "crtc active state doesn't match with hw state " - "(expected %i, found %i)\n", new_crtc_state->base.active, active); + I915_STATE_WARN(new_crtc_state->hw.active != active, + "crtc active state doesn't match with hw state " + "(expected %i, found %i)\n", + new_crtc_state->hw.active, active); - I915_STATE_WARN(crtc->active != new_crtc_state->base.active, - "transitional active state does not match atomic hw state " - "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); + I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, + "transitional active state does not match atomic hw state " + "(expected %i, found %i)\n", + new_crtc_state->hw.active, crtc->active); for_each_encoder_on_crtc(dev, &crtc->base, encoder) { enum pipe pipe; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(active != new_crtc_state->base.active, - "[ENCODER:%i] active %i with crtc active %i\n", - encoder->base.base.id, active, new_crtc_state->base.active); + I915_STATE_WARN(active != new_crtc_state->hw.active, + "[ENCODER:%i] active %i with crtc active %i\n", + encoder->base.base.id, active, + new_crtc_state->hw.active); I915_STATE_WARN(active && crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", @@ -13435,7 +13833,7 @@ verify_crtc_state(struct intel_crtc *crtc, intel_crtc_compute_pixel_rate(pipe_config); - if (!new_crtc_state->base.active) + if (!new_crtc_state->hw.active) return; intel_pipe_config_sanity_check(dev_priv, pipe_config); @@ -13458,7 +13856,7 @@ intel_verify_planes(struct intel_atomic_state *state) for_each_new_intel_plane_in_state(state, plane, plane_state, i) assert_plane(plane, plane_state->planar_slave || - plane_state->base.visible); + plane_state->uapi.visible); } static void @@ -13497,7 +13895,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, crtc_mask = drm_crtc_mask(&crtc->base); - if (new_crtc_state->base.active) + if (new_crtc_state->hw.active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); @@ -13576,10 +13974,10 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, static void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; drm_calc_timestamping_constants(&crtc->base, adjusted_mode); @@ -13661,7 +14059,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) /* look at all crtc's that are going to be enabled in during modeset */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - if (!crtc_state->base.active || + if (!crtc_state->hw.active || !needs_modeset(crtc_state)) continue; @@ -13686,7 +14084,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) crtc_state->hsw_workaround_pipe = INVALID_PIPE; - if (!crtc_state->base.active || + if (!crtc_state->hw.active || needs_modeset(crtc_state)) continue; @@ -13712,11 +14110,6 @@ static int intel_modeset_checks(struct intel_atomic_state *state) struct intel_crtc *crtc; int ret, i; - if (!check_digital_port_conflicts(state)) { - DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); - return -EINVAL; - } - /* keep the current setting */ if (!state->cdclk.force_min_cdclk_changed) state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; @@ -13725,19 +14118,24 @@ static int intel_modeset_checks(struct intel_atomic_state *state) state->active_pipes = dev_priv->active_pipes; state->cdclk.logical = dev_priv->cdclk.logical; state->cdclk.actual = dev_priv->cdclk.actual; - state->cdclk.pipe = INVALID_PIPE; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (new_crtc_state->base.active) + if (new_crtc_state->hw.active) state->active_pipes |= BIT(crtc->pipe); else state->active_pipes &= ~BIT(crtc->pipe); - if (old_crtc_state->base.active != new_crtc_state->base.active) + if (old_crtc_state->hw.active != new_crtc_state->hw.active) state->active_pipe_changes |= BIT(crtc->pipe); } + if (state->active_pipe_changes) { + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; + } + ret = intel_modeset_calc_cdclk(state); if (ret) return ret; @@ -13773,7 +14171,7 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) return; - new_crtc_state->base.mode_changed = false; + new_crtc_state->uapi.mode_changed = false; new_crtc_state->update_pipe = true; /* @@ -13790,12 +14188,49 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta new_crtc_state->has_drrs = old_crtc_state->has_drrs; } -static int intel_atomic_check_planes(struct intel_atomic_state *state) +static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, + struct intel_crtc *crtc, + u8 plane_ids_mask) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + + if ((plane_ids_mask & BIT(plane->id)) == 0) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + } + + return 0; +} + +static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) { + /* See {hsw,vlv,ivb}_plane_ratio() */ + return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || + IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_IVYBRIDGE(dev_priv); +} + +static int intel_atomic_check_planes(struct intel_atomic_state *state, + bool *need_modeset) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_plane_state *plane_state; struct intel_plane *plane; + struct intel_crtc *crtc; int i, ret; + ret = icl_add_linked_planes(state); + if (ret) + return ret; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { ret = intel_plane_atomic_check(state, plane); if (ret) { @@ -13805,6 +14240,41 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) } } + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + u8 old_active_planes, new_active_planes; + + ret = icl_check_nv12_planes(new_crtc_state); + if (ret) + return ret; + + /* + * On some platforms the number of active planes affects + * the planes' minimum cdclk calculation. Add such planes + * to the state before we compute the minimum cdclk. + */ + if (!active_planes_affects_min_cdclk(dev_priv)) + continue; + + old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); + new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); + + if (hweight8(old_active_planes) == hweight8(new_active_planes)) + continue; + + ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); + if (ret) + return ret; + } + + /* + * active_planes bitmask has been updated, and potentially + * affected planes are part of the state. We can now + * compute the minimum cdclk for each plane. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) + *need_modeset |= intel_plane_calc_min_cdclk(state, plane); + return 0; } @@ -13839,14 +14309,14 @@ static int intel_atomic_check(struct drm_device *dev, struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; int ret, i; - bool any_ms = state->cdclk.force_min_cdclk_changed; + bool any_ms = false; /* Catch I915_MODE_FLAG_INHERITED */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (new_crtc_state->base.mode.private_flags != - old_crtc_state->base.mode.private_flags) - new_crtc_state->base.mode_changed = true; + if (new_crtc_state->hw.mode.private_flags != + old_crtc_state->hw.mode.private_flags) + new_crtc_state->uapi.mode_changed = true; } ret = drm_atomic_helper_check_modeset(dev, &state->base); @@ -13855,14 +14325,24 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state)) + if (!needs_modeset(new_crtc_state)) { + /* Light copy */ + intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); + continue; + } + + if (!new_crtc_state->uapi.enable) { + intel_crtc_copy_uapi_to_hw_state(new_crtc_state); - if (!new_crtc_state->base.enable) { any_ms = true; continue; } + ret = intel_crtc_prepare_cleared_state(new_crtc_state); + if (ret) + goto fail; + ret = intel_modeset_pipe_config(new_crtc_state); if (ret) goto fail; @@ -13873,10 +14353,22 @@ static int intel_atomic_check(struct drm_device *dev, any_ms = true; } + if (any_ms && !check_digital_port_conflicts(state)) { + DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); + ret = EINVAL; + goto fail; + } + ret = drm_dp_mst_atomic_check(&state->base); if (ret) goto fail; + any_ms |= state->cdclk.force_min_cdclk_changed; + + ret = intel_atomic_check_planes(state, &any_ms); + if (ret) + goto fail; + if (any_ms) { ret = intel_modeset_checks(state); if (ret) @@ -13885,14 +14377,6 @@ static int intel_atomic_check(struct drm_device *dev, state->cdclk.logical = dev_priv->cdclk.logical; } - ret = icl_add_linked_planes(state); - if (ret) - goto fail; - - ret = intel_atomic_check_planes(state); - if (ret) - goto fail; - ret = intel_atomic_check_crtcs(state); if (ret) goto fail; @@ -13956,7 +14440,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!IS_GEN(dev_priv, 2)) + if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) { @@ -13970,12 +14454,9 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ - crtc->base.mode = new_crtc_state->base.mode; - /* * Update pipe size and adjust fitter if needed: the reason for this is * that in compute_mode_changes we check the native mode (not the pfit @@ -14007,6 +14488,7 @@ static void commit_pipe_config(struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(state->base.dev); bool modeset = needs_modeset(new_crtc_state); @@ -14015,7 +14497,7 @@ static void commit_pipe_config(struct intel_atomic_state *state, * CRTC was enabled. */ if (!modeset) { - if (new_crtc_state->base.color_mgmt_changed || + if (new_crtc_state->uapi.color_mgmt_changed || new_crtc_state->update_pipe) intel_color_commit(new_crtc_state); @@ -14030,8 +14512,7 @@ static void commit_pipe_config(struct intel_atomic_state *state, } if (dev_priv->display.atomic_update_watermarks) - dev_priv->display.atomic_update_watermarks(state, - new_crtc_state); + dev_priv->display.atomic_update_watermarks(state, crtc); } static void intel_update_crtc(struct intel_crtc *crtc, @@ -14048,15 +14529,20 @@ static void intel_update_crtc(struct intel_crtc *crtc, if (modeset) { intel_crtc_update_active_timings(new_crtc_state); - dev_priv->display.crtc_enable(new_crtc_state, state); + dev_priv->display.crtc_enable(state, crtc); /* vblanks work again, re-enable pipe CRC. */ intel_crtc_enable_pipe_crc(crtc); } else { - intel_pre_plane_update(old_crtc_state, new_crtc_state); + if (new_crtc_state->preload_luts && + (new_crtc_state->uapi.color_mgmt_changed || + new_crtc_state->update_pipe)) + intel_color_load_luts(new_crtc_state); + + intel_pre_plane_update(state, crtc); if (new_crtc_state->update_pipe) - intel_encoders_update_pipe(crtc, new_crtc_state, state); + intel_encoders_update_pipe(state, crtc); } if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) @@ -14083,13 +14569,13 @@ static void intel_update_crtc(struct intel_crtc *crtc, * of enabling them on the CRTC's first fastset. */ if (new_crtc_state->update_pipe && !modeset && - old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) + old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); enum transcoder slave_transcoder; WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); @@ -14114,97 +14600,59 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, */ intel_crtc_disable_pipe_crc(crtc); - dev_priv->display.crtc_disable(old_crtc_state, state); + dev_priv->display.crtc_disable(state, crtc); crtc->active = false; intel_fbc_disable(crtc); intel_disable_shared_dpll(old_crtc_state); - /* - * Underruns don't always raise interrupts, - * so check manually. - */ - intel_check_cpu_fifo_underruns(dev_priv); - intel_check_pch_fifo_underruns(dev_priv); - /* FIXME unify this for all platforms */ - if (!new_crtc_state->base.active && + if (!new_crtc_state->hw.active && !HAS_GMCH(dev_priv) && dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, - new_crtc_state); -} - -static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) -{ - struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); - struct intel_crtc_state *new_slave_crtc_state = - intel_atomic_get_new_crtc_state(state, slave_crtc); - struct intel_crtc_state *old_slave_crtc_state = - intel_atomic_get_old_crtc_state(state, slave_crtc); - - WARN_ON(!slave_crtc || !new_slave_crtc_state || - !old_slave_crtc_state); - - /* Disable Slave first */ - intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state); - if (old_slave_crtc_state->base.active) - intel_old_crtc_state_disables(state, - old_slave_crtc_state, - new_slave_crtc_state, - slave_crtc); - - /* Disable Master */ - intel_pre_plane_update(old_crtc_state, new_crtc_state); - if (old_crtc_state->base.active) - intel_old_crtc_state_disables(state, - old_crtc_state, - new_crtc_state, - crtc); + dev_priv->display.initial_watermarks(state, crtc); } static void intel_commit_modeset_disables(struct intel_atomic_state *state) { struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; + u32 handled = 0; int i; - /* - * Disable CRTC/pipes in reverse order because some features(MST in - * TGL+) requires master and slave relationship between pipes, so it - * should always pick the lowest pipe as master as it will be enabled - * first and disable in the reverse order so the master will be the - * last one to be disabled. - */ - for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, - new_crtc_state, i) { + /* Only disable port sync slaves */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) continue; + if (!old_crtc_state->hw.active) + continue; + /* In case of Transcoder port Sync master slave CRTCs can be * assigned in any order and we need to make sure that * slave CRTCs are disabled first and then master CRTC since * Slave vblanks are masked till Master Vblanks. */ - if (is_trans_port_sync_mode(new_crtc_state)) { - if (is_trans_port_sync_master(new_crtc_state)) - intel_trans_port_sync_modeset_disables(state, - crtc, - old_crtc_state, - new_crtc_state); - else - continue; - } else { - intel_pre_plane_update(old_crtc_state, new_crtc_state); + if (!is_trans_port_sync_slave(old_crtc_state)) + continue; - if (old_crtc_state->base.active) - intel_old_crtc_state_disables(state, - old_crtc_state, - new_crtc_state, - crtc); - } + intel_pre_plane_update(state, crtc); + intel_old_crtc_state_disables(state, old_crtc_state, + new_crtc_state, crtc); + handled |= BIT(crtc->pipe); + } + + /* Disable everything else left on */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!needs_modeset(new_crtc_state) || + (handled & BIT(crtc->pipe))) + continue; + + intel_pre_plane_update(state, crtc); + if (old_crtc_state->hw.active) + intel_old_crtc_state_disables(state, old_crtc_state, + new_crtc_state, crtc); } } @@ -14215,7 +14663,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state) int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!new_crtc_state->base.active) + if (!new_crtc_state->hw.active) continue; intel_update_crtc(crtc, state, old_crtc_state, @@ -14230,15 +14678,15 @@ static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(state->base.dev); intel_crtc_update_active_timings(new_crtc_state); - dev_priv->display.crtc_enable(new_crtc_state, state); + dev_priv->display.crtc_enable(state, crtc); intel_crtc_enable_pipe_crc(crtc); } static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, struct intel_atomic_state *state) { + struct drm_connector *uninitialized_var(conn); struct drm_connector_state *conn_state; - struct drm_connector *conn; struct intel_dp *intel_dp; int i; @@ -14280,7 +14728,7 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, * of enabling them on the CRTC's first fastset. */ if (new_crtc_state->update_pipe && !modeset && - old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) + old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } @@ -14335,17 +14783,19 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; - unsigned int updated = 0; - bool progress; - int i; u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; u8 required_slices = state->wm_results.ddb.enabled_slices; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + u8 dirty_pipes = 0; + int i; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { /* ignore allocations for crtc's that have been turned off. */ - if (new_crtc_state->base.active) + if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active) entries[i] = old_crtc_state->wm.skl.ddb; + if (new_crtc_state->hw.active) + dirty_pipes |= BIT(crtc->pipe); + } /* If 2nd DBuf slice required, enable it here */ if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) @@ -14357,15 +14807,13 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) * never overlap with eachother inbetween CRTC updates. Otherwise we'll * cause pipe underruns and other bad stuff. */ - do { - progress = false; - - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + while (dirty_pipes) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { enum pipe pipe = crtc->pipe; - bool vbl_wait = false; bool modeset = needs_modeset(new_crtc_state); - if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active) + if ((dirty_pipes & BIT(pipe)) == 0) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, @@ -14373,20 +14821,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) INTEL_NUM_PIPES(dev_priv), i)) continue; - updated |= BIT(pipe); entries[i] = new_crtc_state->wm.skl.ddb; - - /* - * If this is an already active pipe, it's DDB changed, - * and this isn't the last pipe that needs updating - * then we need to wait for a vblank to pass for the - * new ddb allocation to take effect. - */ - if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, - &old_crtc_state->wm.skl.ddb) && - !modeset && - state->wm_results.dirty_pipes != updated) - vbl_wait = true; + dirty_pipes &= ~BIT(pipe); if (modeset && is_trans_port_sync_mode(new_crtc_state)) { if (is_trans_port_sync_master(new_crtc_state)) @@ -14401,12 +14837,18 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) new_crtc_state); } - if (vbl_wait) + /* + * If this is an already active pipe, it's DDB changed, + * and this isn't the last pipe that needs updating + * then we need to wait for a vblank to pass for the + * new ddb allocation to take effect. + */ + if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, + &old_crtc_state->wm.skl.ddb) && + !modeset && dirty_pipes) intel_wait_for_vblank(dev_priv, pipe); - - progress = true; } - } while (progress); + } /* If 2nd DBuf slice is no more required disable it */ if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) @@ -14527,12 +14969,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) bool modeset = needs_modeset(new_crtc_state); /* Complete events for now disable pipes here. */ - if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { + if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { spin_lock_irq(&dev->event_lock); - drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); + drm_crtc_send_vblank_event(&crtc->base, + new_crtc_state->uapi.event); spin_unlock_irq(&dev->event_lock); - new_crtc_state->base.event = NULL; + new_crtc_state->uapi.event = NULL; } } @@ -14563,9 +15006,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) drm_atomic_helper_wait_for_flip_done(dev, &state->base); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (new_crtc_state->base.active && + if (new_crtc_state->hw.active && !needs_modeset(new_crtc_state) && - (new_crtc_state->base.color_mgmt_changed || + !new_crtc_state->preload_luts && + (new_crtc_state->uapi.color_mgmt_changed || new_crtc_state->update_pipe)) intel_color_load_luts(new_crtc_state); } @@ -14577,14 +15021,25 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * * TODO: Move this (and other cleanup) to an async worker eventually. */ - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + /* + * Gen2 reports pipe underruns whenever all planes are disabled. + * So re-enable underrun reporting after some planes get enabled. + * + * We do this before .optimize_watermarks() so that we have a + * chance of catching underruns with the intermediate watermarks + * vs. the new plane configuration. + */ + if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state)) + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); + if (dev_priv->display.optimize_watermarks) - dev_priv->display.optimize_watermarks(state, - new_crtc_state); + dev_priv->display.optimize_watermarks(state, crtc); } for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - intel_post_plane_update(old_crtc_state); + intel_post_plane_update(state, crtc); if (put_domains[i]) modeset_put_power_domains(dev_priv, put_domains[i]); @@ -14592,6 +15047,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); } + /* Underruns don't always raise interrupts, so check manually */ + intel_check_cpu_fifo_underruns(dev_priv); + intel_check_pch_fifo_underruns(dev_priv); + if (state->modeset) intel_verify_planes(state); @@ -14665,11 +15124,19 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) - intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), - to_intel_frontbuffer(new_plane_state->base.fb), + intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), + to_intel_frontbuffer(new_plane_state->hw.fb), plane->frontbuffer_bit); } +static void assert_global_state_locked(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) + drm_modeset_lock_assert_held(&crtc->base.mutex); +} + static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) @@ -14735,7 +15202,9 @@ static int intel_atomic_commit(struct drm_device *dev, intel_shared_dpll_swap_state(state); intel_atomic_track_fbs(state); - if (state->modeset) { + if (state->global_state_changed) { + assert_global_state_locked(dev_priv); + memcpy(dev_priv->min_cdclk, state->min_cdclk, sizeof(state->min_cdclk)); memcpy(dev_priv->min_voltage_level, state->min_voltage_level, @@ -14782,7 +15251,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait, * vblank without our intervention, so leave RPS alone. */ if (!i915_request_started(rq)) - gen6_rps_boost(rq); + intel_rps_boost(rq); i915_request_put(rq); drm_crtc_vblank_put(wait->crtc); @@ -14823,9 +15292,9 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc, static int intel_plane_pin_fb(struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_framebuffer *fb = plane_state->hw.fb; struct i915_vma *vma; if (plane->id == PLANE_CURSOR && @@ -14863,7 +15332,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) { struct i915_sched_attr attr = { - .priority = I915_PRIORITY_DISPLAY, + .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), }; i915_gem_object_wait_priority(obj, 0, &attr); @@ -14888,9 +15357,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, struct intel_plane_state *new_plane_state = to_intel_plane_state(_new_plane_state); struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_plane_state->base.state); + to_intel_atomic_state(new_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_framebuffer *fb = new_plane_state->base.fb; + struct drm_framebuffer *fb = new_plane_state->hw.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); int ret; @@ -14921,9 +15390,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, } } - if (new_plane_state->base.fence) { /* explicit fencing */ + if (new_plane_state->uapi.fence) { /* explicit fencing */ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, - new_plane_state->base.fence, + new_plane_state->uapi.fence, I915_FENCE_TIMEOUT, GFP_KERNEL); if (ret < 0) @@ -14944,9 +15413,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, return ret; fb_obj_bump_render_priority(obj); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); - if (!new_plane_state->base.fence) { /* implicit fencing */ + if (!new_plane_state->uapi.fence) { /* implicit fencing */ struct dma_fence *fence; ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, @@ -14958,13 +15427,13 @@ intel_prepare_plane_fb(struct drm_plane *plane, fence = dma_resv_get_excl_rcu(obj->base.resv); if (fence) { - add_rps_boost_after_vblank(new_plane_state->base.crtc, + add_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); dma_fence_put(fence); } } else { - add_rps_boost_after_vblank(new_plane_state->base.crtc, - new_plane_state->base.fence); + add_rps_boost_after_vblank(new_plane_state->hw.crtc, + new_plane_state->uapi.fence); } /* @@ -14976,7 +15445,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, * maximum clocks following a vblank miss (see do_rps_boost()). */ if (!intel_state->rps_interactive) { - intel_rps_mark_interactive(dev_priv, true); + intel_rps_mark_interactive(&dev_priv->gt.rps, true); intel_state->rps_interactive = true; } @@ -14997,11 +15466,11 @@ intel_cleanup_plane_fb(struct drm_plane *plane, struct intel_plane_state *old_plane_state = to_intel_plane_state(_old_plane_state); struct intel_atomic_state *intel_state = - to_intel_atomic_state(old_plane_state->base.state); + to_intel_atomic_state(old_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); if (intel_state->rps_interactive) { - intel_rps_mark_interactive(dev_priv, false); + intel_rps_mark_interactive(&dev_priv->gt.rps, false); intel_state->rps_interactive = false; } @@ -15009,44 +15478,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, intel_plane_unpin_fb(old_plane_state); } -int -skl_max_scale(const struct intel_crtc_state *crtc_state, - const struct drm_format_info *format) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int max_scale; - int crtc_clock, max_dotclk, tmpclk1, tmpclk2; - - if (!crtc_state->base.enable) - return DRM_PLANE_HELPER_NO_SCALING; - - crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; - max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; - - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) - max_dotclk *= 2; - - if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) - return DRM_PLANE_HELPER_NO_SCALING; - - /* - * skl max scale is lower of: - * close to 3 but not 3, -1 is for that purpose - * or - * cdclk/crtc_clock - */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || - !drm_format_info_is_yuv_semiplanar(format)) - tmpclk1 = 0x30000 - 1; - else - tmpclk1 = 0x20000 - 1; - tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); - max_scale = min(tmpclk1, tmpclk2); - - return max_scale; -} - /** * intel_plane_destroy - destroy a plane * @plane: plane to destroy @@ -15099,8 +15530,13 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XBGR16161616F: return modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED; default: @@ -15157,7 +15593,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane, * When crtc is inactive or there is a modeset pending, * wait for it to complete in the slowpath */ - if (!crtc_state->base.active || needs_modeset(crtc_state) || + if (!crtc_state->hw.active || needs_modeset(crtc_state) || crtc_state->update_pipe) goto slow; @@ -15166,8 +15602,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane, * the plane. This prevents our async update's changes from getting * overridden by a previous synchronous update's state. */ - if (old_plane_state->base.commit && - !try_wait_for_completion(&old_plane_state->base.commit->hw_done)) + if (old_plane_state->uapi.commit && + !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) goto slow; /* @@ -15175,12 +15611,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane, * take the slowpath. Only changing fb or position should be * in the fastpath. */ - if (old_plane_state->base.crtc != &crtc->base || - old_plane_state->base.src_w != src_w || - old_plane_state->base.src_h != src_h || - old_plane_state->base.crtc_w != crtc_w || - old_plane_state->base.crtc_h != crtc_h || - !old_plane_state->base.fb != !fb) + if (old_plane_state->uapi.crtc != &crtc->base || + old_plane_state->uapi.src_w != src_w || + old_plane_state->uapi.src_h != src_h || + old_plane_state->uapi.crtc_w != crtc_w || + old_plane_state->uapi.crtc_h != crtc_h || + !old_plane_state->uapi.fb != !fb) goto slow; new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); @@ -15193,16 +15629,16 @@ intel_legacy_cursor_update(struct drm_plane *_plane, goto out_free; } - drm_atomic_set_fb_for_plane(&new_plane_state->base, fb); + drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); - new_plane_state->base.src_x = src_x; - new_plane_state->base.src_y = src_y; - new_plane_state->base.src_w = src_w; - new_plane_state->base.src_h = src_h; - new_plane_state->base.crtc_x = crtc_x; - new_plane_state->base.crtc_y = crtc_y; - new_plane_state->base.crtc_w = crtc_w; - new_plane_state->base.crtc_h = crtc_h; + new_plane_state->uapi.src_x = src_x; + new_plane_state->uapi.src_y = src_y; + new_plane_state->uapi.src_w = src_w; + new_plane_state->uapi.src_h = src_h; + new_plane_state->uapi.crtc_x = crtc_x; + new_plane_state->uapi.crtc_y = crtc_y; + new_plane_state->uapi.crtc_w = crtc_w; + new_plane_state->uapi.crtc_h = crtc_h; ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, old_plane_state, new_plane_state); @@ -15213,13 +15649,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane, if (ret) goto out_free; - intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP); - intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), - to_intel_frontbuffer(new_plane_state->base.fb), + intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), + ORIGIN_FLIP); + intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), + to_intel_frontbuffer(new_plane_state->hw.fb), plane->frontbuffer_bit); /* Swap plane state */ - plane->base.state = &new_plane_state->base; + plane->base.state = &new_plane_state->uapi; /* * We cannot swap crtc_state as it may be in use by an atomic commit or @@ -15233,7 +15670,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (new_plane_state->base.visible) + if (new_plane_state->uapi.visible) intel_update_plane(plane, crtc_state, new_plane_state); else intel_disable_plane(plane, crtc_state); @@ -15242,11 +15679,11 @@ intel_legacy_cursor_update(struct drm_plane *_plane, out_free: if (new_crtc_state) - intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base); + intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); if (ret) - intel_plane_destroy_state(&plane->base, &new_plane_state->base); + intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); else - intel_plane_destroy_state(&plane->base, &old_plane_state->base); + intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); return ret; slow: @@ -15288,7 +15725,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; unsigned int possible_crtcs; - const u64 *modifiers; const u32 *formats; int num_formats; int ret, zpos; @@ -15320,44 +15756,69 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; } - if (INTEL_GEN(dev_priv) >= 4) { - formats = i965_primary_formats; - num_formats = ARRAY_SIZE(i965_primary_formats); - modifiers = i9xx_format_modifiers; - - plane->max_stride = i9xx_plane_max_stride; - plane->update_plane = i9xx_update_plane; - plane->disable_plane = i9xx_disable_plane; - plane->get_hw_state = i9xx_plane_get_hw_state; - plane->check_plane = i9xx_plane_check; - - plane_funcs = &i965_plane_funcs; + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + formats = vlv_primary_formats; + num_formats = ARRAY_SIZE(vlv_primary_formats); + } else if (INTEL_GEN(dev_priv) >= 4) { + /* + * WaFP16GammaEnabling:ivb + * "Workaround : When using the 64-bit format, the plane + * output on each color channel has one quarter amplitude. + * It can be brought up to full amplitude by using pipe + * gamma correction or pipe color space conversion to + * multiply the plane output by four." + * + * There is no dedicated plane gamma for the primary plane, + * and using the pipe gamma/csc could conflict with other + * planes, so we choose not to expose fp16 on IVB primary + * planes. HSW primary planes no longer have this problem. + */ + if (IS_IVYBRIDGE(dev_priv)) { + formats = ivb_primary_formats; + num_formats = ARRAY_SIZE(ivb_primary_formats); + } else { + formats = i965_primary_formats; + num_formats = ARRAY_SIZE(i965_primary_formats); + } } else { formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); - modifiers = i9xx_format_modifiers; - - plane->max_stride = i9xx_plane_max_stride; - plane->update_plane = i9xx_update_plane; - plane->disable_plane = i9xx_disable_plane; - plane->get_hw_state = i9xx_plane_get_hw_state; - plane->check_plane = i9xx_plane_check; + } + if (INTEL_GEN(dev_priv) >= 4) + plane_funcs = &i965_plane_funcs; + else plane_funcs = &i8xx_plane_funcs; - } + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + plane->min_cdclk = vlv_plane_min_cdclk; + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + plane->min_cdclk = hsw_plane_min_cdclk; + else if (IS_IVYBRIDGE(dev_priv)) + plane->min_cdclk = ivb_plane_min_cdclk; + else + plane->min_cdclk = i9xx_plane_min_cdclk; + + plane->max_stride = i9xx_plane_max_stride; + plane->update_plane = i9xx_update_plane; + plane->disable_plane = i9xx_disable_plane; + plane->get_hw_state = i9xx_plane_get_hw_state; + plane->check_plane = i9xx_plane_check; possible_crtcs = BIT(pipe); if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, possible_crtcs, plane_funcs, - formats, num_formats, modifiers, + formats, num_formats, + i9xx_format_modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, possible_crtcs, plane_funcs, - formats, num_formats, modifiers, + formats, num_formats, + i9xx_format_modifiers, DRM_PLANE_TYPE_PRIMARY, "plane %c", plane_name(plane->i9xx_plane)); @@ -15461,28 +15922,6 @@ fail: return ERR_PTR(ret); } -static void intel_crtc_init_scalers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) -{ - struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int i; - - crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe]; - if (!crtc->num_scalers) - return; - - for (i = 0; i < crtc->num_scalers; i++) { - struct intel_scaler *scaler = &scaler_state->scalers[i]; - - scaler->in_use = 0; - scaler->mode = 0; - } - - scaler_state->scaler_id = -1; -} - #define INTEL_CRTC_FUNCS \ .gamma_set = drm_atomic_helper_legacy_gamma_set, \ .set_config = drm_atomic_helper_set_config, \ @@ -15550,33 +15989,53 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = { .disable_vblank = i8xx_disable_vblank, }; +static struct intel_crtc *intel_crtc_alloc(void) +{ + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); + if (!crtc) + return ERR_PTR(-ENOMEM); + + crtc_state = intel_crtc_state_alloc(crtc); + if (!crtc_state) { + kfree(crtc); + return ERR_PTR(-ENOMEM); + } + + crtc->base.state = &crtc_state->uapi; + crtc->config = crtc_state; + + return crtc; +} + +static void intel_crtc_free(struct intel_crtc *crtc) +{ + intel_crtc_destroy_state(&crtc->base, crtc->base.state); + kfree(crtc); +} + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_plane *primary, *cursor; const struct drm_crtc_funcs *funcs; - struct intel_crtc *intel_crtc; - struct intel_crtc_state *crtc_state = NULL; - struct intel_plane *primary = NULL; - struct intel_plane *cursor = NULL; + struct intel_crtc *crtc; int sprite, ret; - intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); - if (!intel_crtc) - return -ENOMEM; + crtc = intel_crtc_alloc(); + if (IS_ERR(crtc)) + return PTR_ERR(crtc); - crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); - if (!crtc_state) { - ret = -ENOMEM; - goto fail; - } - __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base); - intel_crtc->config = crtc_state; + crtc->pipe = pipe; + crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; primary = intel_primary_plane_create(dev_priv, pipe); if (IS_ERR(primary)) { ret = PTR_ERR(primary); goto fail; } - intel_crtc->plane_ids_mask |= BIT(primary->id); + crtc->plane_ids_mask |= BIT(primary->id); for_each_sprite(dev_priv, pipe, sprite) { struct intel_plane *plane; @@ -15586,7 +16045,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) ret = PTR_ERR(plane); goto fail; } - intel_crtc->plane_ids_mask |= BIT(plane->id); + crtc->plane_ids_mask |= BIT(plane->id); } cursor = intel_cursor_plane_create(dev_priv, pipe); @@ -15594,7 +16053,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) ret = PTR_ERR(cursor); goto fail; } - intel_crtc->plane_ids_mask |= BIT(cursor->id); + crtc->plane_ids_mask |= BIT(cursor->id); if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv) || @@ -15615,42 +16074,32 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) funcs = &ilk_crtc_funcs; } - ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, + ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base, &primary->base, &cursor->base, funcs, "pipe %c", pipe_name(pipe)); if (ret) goto fail; - intel_crtc->pipe = pipe; - - /* initialize shared scalers */ - intel_crtc_init_scalers(intel_crtc, crtc_state); - BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || dev_priv->pipe_to_crtc_mapping[pipe] != NULL); - dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc; + dev_priv->pipe_to_crtc_mapping[pipe] = crtc; if (INTEL_GEN(dev_priv) < 9) { enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); - dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; + dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; } - intel_color_init(intel_crtc); + intel_color_init(crtc); - WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe); return 0; fail: - /* - * drm_mode_config_cleanup() will free up any - * crtcs/planes already initialized. - */ - kfree(crtc_state); - kfree(intel_crtc); + intel_crtc_free(crtc); return ret; } @@ -15693,7 +16142,7 @@ static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) u32 possible_crtcs = 0; for_each_intel_crtc(dev, crtc) { - if (encoder->crtc_mask & BIT(crtc->pipe)) + if (encoder->pipe_mask & BIT(crtc->pipe)) possible_crtcs |= drm_crtc_mask(&crtc->base); } @@ -16151,26 +16600,23 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } stride_alignment = intel_fb_stride_alignment(fb, i); - - /* - * Display WA #0531: skl,bxt,kbl,glk - * - * Render decompression and plane width > 3840 - * combined with horizontal panning requires the - * plane stride to be a multiple of 4. We'll just - * require the entire fb to accommodate that to avoid - * potential runtime errors at plane configuration time. - */ - if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 && - is_ccs_modifier(fb->modifier)) - stride_alignment *= 4; - if (fb->pitches[i] & (stride_alignment - 1)) { DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", i, fb->pitches[i], stride_alignment); goto err; } + if (is_gen12_ccs_plane(fb, i)) { + int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); + + if (fb->pitches[i] != ccs_aux_stride) { + DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n", + i, + fb->pitches[i], ccs_aux_stride); + goto err; + } + } + fb->obj[i] = &obj->base; } @@ -16294,6 +16740,21 @@ intel_mode_valid(struct drm_device *dev, mode->vtotal > vtotal_max) return MODE_V_ILLEGAL; + if (INTEL_GEN(dev_priv) >= 5) { + if (mode->hdisplay < 64 || + mode->htotal - mode->hdisplay < 32) + return MODE_H_ILLEGAL; + + if (mode->vtotal - mode->vdisplay < 5) + return MODE_V_ILLEGAL; + } else { + if (mode->htotal - mode->hdisplay < 32) + return MODE_H_ILLEGAL; + + if (mode->vtotal - mode->vdisplay < 3) + return MODE_V_ILLEGAL; + } + return MODE_OK; } @@ -16427,8 +16888,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - dev_priv->display.fdi_link_train = hsw_fdi_link_train; } if (INTEL_GEN(dev_priv) >= 9) @@ -16518,7 +16977,7 @@ retry: /* Write calculated watermark values back */ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { crtc_state->wm.need_postvbl_update = true; - dev_priv->display.optimize_watermarks(intel_state, crtc_state); + dev_priv->display.optimize_watermarks(intel_state, crtc); to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; } @@ -16550,8 +17009,7 @@ static int intel_initial_commit(struct drm_device *dev) { struct drm_atomic_state *state = NULL; struct drm_modeset_acquire_ctx ctx; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; + struct intel_crtc *crtc; int ret = 0; state = drm_atomic_state_alloc(dev); @@ -16563,15 +17021,17 @@ static int intel_initial_commit(struct drm_device *dev) retry: state->acquire_ctx = &ctx; - drm_for_each_crtc(crtc, dev) { - crtc_state = drm_atomic_get_crtc_state(state, crtc); + for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } - if (crtc_state->active) { - ret = drm_atomic_add_affected_planes(state, crtc); + if (crtc_state->hw.active) { + ret = drm_atomic_add_affected_planes(state, &crtc->base); if (ret) goto out; @@ -16581,7 +17041,7 @@ retry: * having a proper LUT loaded. Remove once we * have readout for pipe gamma enable. */ - crtc_state->color_mgmt_changed = true; + crtc_state->uapi.color_mgmt_changed = true; } } @@ -16904,31 +17364,76 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv, (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); } -static void intel_sanitize_crtc(struct intel_crtc *crtc, - struct drm_modeset_acquire_ctx *ctx) +static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - /* Clear any frame start delays used for debugging left by the BIOS */ - if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); + u32 val; + + if (transcoder_is_dsi(cpu_transcoder)) + return; + + val = I915_READ(reg); + val &= ~HSW_FRAME_START_DELAY_MASK; + val |= HSW_FRAME_START_DELAY(0); + I915_WRITE(reg, val); + } else { i915_reg_t reg = PIPECONF(cpu_transcoder); + u32 val; - I915_WRITE(reg, - I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + val = I915_READ(reg); + val &= ~PIPECONF_FRAME_START_DELAY_MASK; + val |= PIPECONF_FRAME_START_DELAY(0); + I915_WRITE(reg, val); } - if (crtc_state->base.active) { + if (!crtc_state->has_pch_encoder) + return; + + if (HAS_PCH_IBX(dev_priv)) { + i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); + u32 val; + + val = I915_READ(reg); + val &= ~TRANS_FRAME_START_DELAY_MASK; + val |= TRANS_FRAME_START_DELAY(0); + I915_WRITE(reg, val); + } else { + enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); + i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); + u32 val; + + val = I915_READ(reg); + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); + I915_WRITE(reg, val); + } +} + +static void intel_sanitize_crtc(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + + if (crtc_state->hw.active) { struct intel_plane *plane; + /* Clear any frame start delays used for debugging left by the BIOS */ + intel_sanitize_frame_start_delay(crtc_state); + /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(dev, crtc, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - if (plane_state->base.visible && + if (plane_state->uapi.visible && plane->base.type != DRM_PLANE_TYPE_PRIMARY) intel_plane_disable_noatomic(crtc, plane); } @@ -16945,10 +17450,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ - if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) - intel_crtc_disable_noatomic(&crtc->base, ctx); + if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) + intel_crtc_disable_noatomic(crtc, ctx); - if (crtc_state->base.active || HAS_GMCH(dev_priv)) { + if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { /* * We start out with underrun reporting disabled to avoid races. * For correct bookkeeping mark this on active crtcs. @@ -16979,7 +17484,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); /* * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram @@ -16992,7 +17497,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) * road. */ return IS_GEN(dev_priv, 6) && - crtc_state->base.active && + crtc_state->hw.active && crtc_state->shared_dpll && crtc_state->port_clock == 0; } @@ -17009,7 +17514,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * encoder is active and trying to read from a pipe) and the * pipe itself being active. */ bool has_active_crtc = crtc_state && - crtc_state->base.active; + crtc_state->hw.active; if (crtc_state && has_bogus_dpll_config(crtc_state)) { DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", @@ -17113,22 +17618,22 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); - memset(crtc_state, 0, sizeof(*crtc_state)); - __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base); + __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); + intel_crtc_free_hw_state(crtc_state); + intel_crtc_state_reset(crtc_state, crtc); - crtc_state->base.active = crtc_state->base.enable = + crtc_state->hw.active = crtc_state->hw.enable = dev_priv->display.get_pipe_config(crtc, crtc_state); - crtc->base.enabled = crtc_state->base.enable; - crtc->active = crtc_state->base.active; + crtc->base.enabled = crtc_state->hw.enable; + crtc->active = crtc_state->hw.active; - if (crtc_state->base.active) + if (crtc_state->hw.active) dev_priv->active_pipes |= BIT(crtc->pipe); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, - enableddisabled(crtc_state->base.active)); + enableddisabled(crtc_state->hw.active)); } readout_plane_state(dev_priv); @@ -17150,7 +17655,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - if (crtc_state->base.active && + if (crtc_state->hw.active && crtc_state->shared_dpll == pll) pll->state.crtc_mask |= 1 << crtc->pipe; } @@ -17195,15 +17700,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc = to_intel_crtc(encoder->base.crtc); crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; - if (crtc_state && crtc_state->base.active) { + if (crtc_state && crtc_state->hw.active) { /* * This has to be done during hardware readout * because anything calling .crtc_disable may * rely on the connector_mask being accurate. */ - crtc_state->base.connector_mask |= + crtc_state->uapi.connector_mask |= drm_connector_mask(&connector->base); - crtc_state->base.encoder_mask |= + crtc_state->uapi.encoder_mask |= drm_encoder_mask(&encoder->base); } } else { @@ -17224,13 +17729,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_plane *plane; int min_cdclk = 0; - memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); - if (crtc_state->base.active) { - intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); - crtc->base.mode.hdisplay = crtc_state->pipe_src_w; - crtc->base.mode.vdisplay = crtc_state->pipe_src_h; - intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); - WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &crtc->base.mode)); + if (crtc_state->hw.active) { + struct drm_display_mode *mode = &crtc_state->hw.mode; + + intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, + crtc_state); + + *mode = crtc_state->hw.adjusted_mode; + mode->hdisplay = crtc_state->pipe_src_w; + mode->vdisplay = crtc_state->pipe_src_h; /* * The initial mode needs to be set in order to keep @@ -17241,20 +17748,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) * set a flag to indicate that a full recalculation is * needed on the next commit. */ - crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; + mode->private_flags = I915_MODE_FLAG_INHERITED; intel_crtc_compute_pixel_rate(crtc_state); - min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); - if (WARN_ON(min_cdclk < 0)) - min_cdclk = 0; - intel_crtc_update_active_timings(crtc_state); - } - dev_priv->min_cdclk[crtc->pipe] = min_cdclk; - dev_priv->min_voltage_level[crtc->pipe] = - crtc_state->min_voltage_level; + intel_crtc_copy_hw_to_uapi_state(crtc_state); + } for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { const struct intel_plane_state *plane_state = @@ -17264,11 +17765,37 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) * FIXME don't have the fb yet, so can't * use intel_plane_data_rate() :( */ - if (plane_state->base.visible) + if (plane_state->uapi.visible) crtc_state->data_rate[plane->id] = 4 * crtc_state->pixel_rate; + /* + * FIXME don't have the fb yet, so can't + * use plane->min_cdclk() :( + */ + if (plane_state->uapi.visible && plane->min_cdclk) { + if (crtc_state->double_wide || + INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + crtc_state->min_cdclk[plane->id] = + DIV_ROUND_UP(crtc_state->pixel_rate, 2); + else + crtc_state->min_cdclk[plane->id] = + crtc_state->pixel_rate; + } + DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n", + plane->base.base.id, plane->base.name, + crtc_state->min_cdclk[plane->id]); } + if (crtc_state->hw.active) { + min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); + if (WARN_ON(min_cdclk < 0)) + min_cdclk = 0; + } + + dev_priv->min_cdclk[crtc->pipe] = min_cdclk; + dev_priv->min_voltage_level[crtc->pipe] = + crtc_state->min_voltage_level; + intel_bw_crtc_update(bw_state, crtc_state); intel_pipe_config_sanity_check(dev_priv, crtc_state); @@ -17382,7 +17909,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *crtc_state; struct intel_encoder *encoder; struct intel_crtc *crtc; intel_wakeref_t wakeref; @@ -17415,11 +17941,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev, * waits, so we need vblank interrupts restored beforehand. */ for_each_intel_crtc(&dev_priv->drm, crtc) { - crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); drm_crtc_vblank_reset(&crtc->base); - if (crtc_state->base.active) + if (crtc_state->hw.active) intel_crtc_vblank_on(crtc_state); } @@ -17429,7 +17956,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_sanitize_encoder(encoder); for_each_intel_crtc(&dev_priv->drm, crtc) { - crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + intel_sanitize_crtc(crtc, ctx); intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); } @@ -17462,17 +17991,16 @@ intel_modeset_setup_hw_state(struct drm_device *dev, } for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); u64 put_domains; - crtc_state = to_intel_crtc_state(crtc->base.state); put_domains = modeset_get_crtc_power_domains(crtc_state); if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); - - intel_fbc_init_pipe_state(dev_priv); } void intel_display_resume(struct drm_device *dev) @@ -17548,6 +18076,13 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) */ intel_hpd_poll_fini(i915); + /* + * MST topology needs to be suspended so we don't have any calls to + * fbdev after it's finalized. MST will be destroyed later as part of + * drm_mode_config_cleanup() + */ + intel_dp_mst_suspend(i915); + /* poll work can call into fbdev, hence clean that up afterwards */ intel_fbdev_fini(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 7dcb176d91b0..0fef9263cddc 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -27,7 +27,6 @@ #include <drm/drm_util.h> #include <drm/i915_drm.h> -#include "intel_dp_link_training.h" enum link_m_n_set; struct dpll; @@ -333,8 +332,11 @@ enum phy_fia { (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \ (__s)++) -#define for_each_port_masked(__port, __ports_mask) \ - for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ +#define for_each_port(__port) \ + for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) + +#define for_each_port_masked(__port, __ports_mask) \ + for_each_port(__port) \ for_each_if((__ports_mask) & BIT(__port)) #define for_each_phy_masked(__phy, __phys_mask) \ @@ -378,6 +380,13 @@ enum phy_fia { &(dev)->mode_config.encoder_list, \ base.head) +#define for_each_intel_encoder_mask(dev, intel_encoder, encoder_mask) \ + list_for_each_entry(intel_encoder, \ + &(dev)->mode_config.encoder_list, \ + base.head) \ + for_each_if((encoder_mask) & \ + drm_encoder_mask(&intel_encoder->base)) + #define for_each_intel_dp(dev, intel_encoder) \ for_each_intel_encoder(dev, intel_encoder) \ for_each_if(intel_encoder_is_dp(intel_encoder)) @@ -447,10 +456,18 @@ enum phy_fia { #define intel_atomic_crtc_state_for_each_plane_state( \ plane, plane_state, \ crtc_state) \ - for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \ - ((crtc_state)->base.plane_mask)) \ + for_each_intel_plane_mask(((crtc_state)->uapi.state->dev), (plane), \ + ((crtc_state)->uapi.plane_mask)) \ for_each_if ((plane_state = \ - to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base)))) + to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->uapi.state, &plane->base)))) + +#define for_each_new_intel_connector_in_state(__state, connector, new_connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->base.num_connector; \ + (__i)++) \ + for_each_if ((__state)->base.connectors[__i].ptr && \ + ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ + (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, @@ -468,6 +485,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); void intel_plane_destroy(struct drm_plane *plane); +void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state); void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); @@ -500,16 +518,14 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe); u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); +void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, unsigned int expected_mask); int intel_get_load_detect_pipe(struct drm_connector *connector, - const struct drm_display_mode *mode, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, @@ -549,7 +565,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); -bool intel_crtc_active(struct intel_crtc *crtc); bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); void hsw_enable_ips(const struct intel_crtc_state *crtc_state); void hsw_disable_ips(const struct intel_crtc_state *crtc_state); @@ -563,8 +578,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); -int skl_max_scale(const struct intel_crtc_state *crtc_state, - const struct drm_format_info *format); +void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state); +void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); @@ -586,6 +601,10 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv); void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct intel_display_error_state *error); +bool +intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + uint64_t modifier); + /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init(struct drm_i915_private *i915); @@ -607,9 +626,10 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) -void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); -#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) -#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) +void assert_pipe(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, bool state); +#define assert_pipe_enabled(d, t) assert_pipe(d, t, true) +#define assert_pipe_disabled(d, t) assert_pipe(d, t, false) /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 6f9e7927e248..679457156797 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -418,7 +418,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - int wa_idx_max; + + WARN_ON(!IS_ICELAKE(dev_priv)); val = I915_READ(regs->driver); I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); @@ -430,14 +431,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_enable(dev_priv, power_well); - /* Display WA #1178: icl, tgl */ - if (IS_TIGERLAKE(dev_priv)) - wa_idx_max = ICL_PW_CTL_IDX_AUX_C; - else - wa_idx_max = ICL_PW_CTL_IDX_AUX_B; - - if (!IS_ELKHARTLAKE(dev_priv) && - pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max && + /* Display WA #1178: icl */ + if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; @@ -454,10 +449,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - if (INTEL_GEN(dev_priv) < 12) { - val = I915_READ(ICL_PORT_CL_DW12(phy)); - I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); - } + WARN_ON(!IS_ICELAKE(dev_priv)); + + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); val = I915_READ(regs->driver); I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); @@ -2682,6 +2677,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, TGL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ @@ -3686,6 +3683,151 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; +static const struct i915_power_well_desc ehl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, + { + .name = "power well 2", + .domains = ICL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, + { + .name = "power well 3", + .domains = ICL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + }, + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + }, + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + }, + }, + { + .name = "DDI D IO", + .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_D, + }, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, + }, + { + .name = "AUX D", + .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + }, + }, + { + .name = "power well 4", + .domains = ICL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, + }, +}; + static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "always-on", @@ -3830,7 +3972,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3840,7 +3982,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX B", .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -3850,7 +3992,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX C", .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, + .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4160,6 +4302,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ if (IS_GEN(dev_priv, 12)) { err = set_power_wells(power_domains, tgl_power_wells); + } else if (IS_ELKHARTLAKE(dev_priv)) { + err = set_power_wells(power_domains, ehl_power_wells); } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { @@ -4779,6 +4923,56 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) intel_combo_phy_uninit(dev_priv); } +struct buddy_page_mask { + u32 page_mask; + u8 type; + u8 num_channels; +}; + +static const struct buddy_page_mask tgl_buddy_page_masks[] = { + { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE }, + { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, + { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, + { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, + {} +}; + +static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { + { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, + { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, + { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, + { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, + {} +}; + +static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) +{ + enum intel_dram_type type = dev_priv->dram_info.type; + u8 num_channels = dev_priv->dram_info.num_channels; + const struct buddy_page_mask *table; + int i; + + if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) + /* Wa_1409767108: tgl */ + table = wa_1409767108_buddy_page_masks; + else + table = tgl_buddy_page_masks; + + for (i = 0; table[i].page_mask != 0; i++) + if (table[i].num_channels == num_channels && + table[i].type == type) + break; + + if (table[i].page_mask == 0) { + DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n"); + I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE); + I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE); + } else { + I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask); + I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask); + } +} + static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { @@ -4811,6 +5005,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, /* 6. Setup MBUS. */ icl_mbus_init(dev_priv); + /* 7. Program arbiter BW_BUDDY registers */ + if (INTEL_GEN(dev_priv) >= 12) + tgl_bw_buddy_init(dev_priv); + if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); } @@ -5013,6 +5211,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) power_domains->initializing = true; + /* Must happen before power domain init on VLV/CHV */ + intel_update_rawclk(i915); + if (INTEL_GEN(i915) >= 11) { icl_display_core_init(i915, resume); } else if (IS_CANNONLAKE(i915)) { diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 1da04f3e0fb3..2608a65af7fa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -28,7 +28,7 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_D, POWER_DOMAIN_TRANSCODER_EDP, - /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */ + /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */ POWER_DOMAIN_TRANSCODER_VDSC_PW2, POWER_DOMAIN_TRANSCODER_DSI_A, POWER_DOMAIN_TRANSCODER_DSI_C, diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 8358152e403e..83ea04149b77 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -128,7 +128,8 @@ struct intel_encoder { enum intel_output_type type; enum port port; - unsigned int cloneable; + u16 cloneable; + u8 pipe_mask; enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder, struct intel_connector *connector, bool irq_received); @@ -187,7 +188,6 @@ struct intel_encoder { * device interrupts are disabled. */ void (*suspend)(struct intel_encoder *); - int crtc_mask; enum hpd_pin hpd_pin; enum intel_display_power_domain power_domain; /* for communication with audio component; protected by av_mutex */ @@ -506,6 +506,14 @@ struct intel_atomic_state { bool rps_interactive; + /* + * active_pipes + * min_cdclk[] + * min_voltage_level[] + * cdclk.* + */ + bool global_state_changed; + /* Gen9+ only */ struct skl_ddb_values wm_results; @@ -515,7 +523,24 @@ struct intel_atomic_state { }; struct intel_plane_state { - struct drm_plane_state base; + struct drm_plane_state uapi; + + /* + * actual hardware state, the state we program to the hardware. + * The following members are used to verify the hardware state: + * During initial hw readout, they need to be copied from uapi. + */ + struct { + struct drm_crtc *crtc; + struct drm_framebuffer *fb; + + u16 alpha; + uint16_t pixel_blend_mode; + unsigned int rotation; + enum drm_color_encoding color_encoding; + enum drm_color_range color_range; + } hw; + struct i915_ggtt_view view; struct i915_vma *vma; unsigned long flags; @@ -538,6 +563,9 @@ struct intel_plane_state { /* plane color control register */ u32 color_ctl; + /* chroma upsampler control register */ + u32 cus_ctl; + /* * scaler_id * = -1 : not using a scaler @@ -749,7 +777,33 @@ enum intel_output_format { }; struct intel_crtc_state { - struct drm_crtc_state base; + /* + * uapi (drm) state. This is the software state shown to userspace. + * In particular, the following members are used for bookkeeping: + * - crtc + * - state + * - *_changed + * - event + * - commit + * - mode_blob + */ + struct drm_crtc_state uapi; + + /* + * actual hardware state, the state we program to the hardware. + * The following members are used to verify the hardware state: + * - enable + * - active + * - mode / adjusted_mode + * - color property blobs. + * + * During initial hw readout, they need to be copied to uapi. + */ + struct { + bool active, enable; + struct drm_property_blob *degamma_lut, *gamma_lut, *ctm; + struct drm_display_mode mode, adjusted_mode; + } hw; /** * quirks - bitfield with hw state readout quirks @@ -767,6 +821,7 @@ struct intel_crtc_state { bool disable_cxsr; bool update_wm_pre, update_wm_post; /* watermarks are updated */ bool fifo_changed; /* FIFO split is changed */ + bool preload_luts; /* Pipe source size (ie. panel fitter input size) * All planes will be positioned inside this space, @@ -932,6 +987,8 @@ struct intel_crtc_state { struct intel_crtc_wm_state wm; + int min_cdclk[I915_MAX_PLANES]; + u32 data_rate[I915_MAX_PLANES]; /* Gamma mode programmed on the pipe */ @@ -986,8 +1043,8 @@ struct intel_crtc_state { bool dsc_split; u16 compressed_bpp; u8 slice_count; - } dsc_params; - struct drm_dsc_config dp_dsc_cfg; + struct drm_dsc_config config; + } dsc; /* Forward Error correction State */ bool fec_enable; @@ -1069,14 +1126,13 @@ struct intel_plane { void (*update_plane)(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*update_slave)(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); void (*disable_plane)(struct intel_plane *plane, const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); + int (*min_cdclk)(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); }; struct intel_watermark_params { @@ -1100,12 +1156,12 @@ struct cxsr_latency { #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) -#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base) +#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi) #define to_intel_connector(x) container_of(x, struct intel_connector, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) -#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base) +#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi) #define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL) struct intel_hdmi { @@ -1515,6 +1571,24 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state, &crtc->base)); } +static inline struct intel_digital_connector_state * +intel_atomic_get_new_connector_state(struct intel_atomic_state *state, + struct intel_connector *connector) +{ + return to_intel_digital_connector_state( + drm_atomic_get_new_connector_state(&state->base, + &connector->base)); +} + +static inline struct intel_digital_connector_state * +intel_atomic_get_old_connector_state(struct intel_atomic_state *state, + struct intel_connector *connector) +{ + return to_intel_digital_connector_state( + drm_atomic_get_old_connector_state(&state->base, + &connector->base)); +} + /* intel_display.c */ static inline bool intel_crtc_has_type(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index aa515261cb9f..2f31d226c6eb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1179,18 +1179,20 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); + const unsigned int timeout_ms = 10; u32 status; bool done; #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) done = wait_event_timeout(i915->gmbus_wait_queue, C, - msecs_to_jiffies_timeout(10)); + msecs_to_jiffies_timeout(timeout_ms)); /* just trace the final value */ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (!done) - DRM_ERROR("dp aux hw did not signal timeout!\n"); + DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n", + intel_dp->aux.name, timeout_ms, status); #undef C return status; @@ -1291,6 +1293,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, u32 unused) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = + to_i915(intel_dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); u32 ret; ret = DP_AUX_CH_CTL_SEND_BUSY | @@ -1303,7 +1308,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); - if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) + if (intel_phy_is_tc(i915, phy) && + intel_dig_port->tc_mode == TC_PORT_TBT_ALT) ret |= DP_AUX_CH_CTL_TBT_IO; return ret; @@ -1883,29 +1889,15 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp, drm_dp_sink_supports_fec(intel_dp->fec_capable); } -static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - /* On TGL, DSC is supported on all Pipes */ - if (INTEL_GEN(dev_priv) >= 12) - return true; - - if (INTEL_GEN(dev_priv) >= 10 && - pipe_config->cpu_transcoder != TRANSCODER_A) - return true; - - return false; -} - static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *crtc_state) { - if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable) + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) return false; - return intel_dp_source_supports_dsc(intel_dp, pipe_config) && + return intel_dsc_source_support(encoder, crtc_state) && drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); } @@ -1990,7 +1982,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, const struct link_config_limits *limits) { - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int bpp, clock, lane_count; int mode_rate, link_clock, link_avail; @@ -2037,6 +2029,63 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) return 0; } +#define DSC_SUPPORTED_VERSION_MIN 1 + +static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + u8 line_buf_depth; + int ret; + + ret = intel_dsc_compute_params(encoder, crtc_state); + if (ret) + return ret; + + /* + * Slice Height of 8 works for all currently available panels. So start + * with that if pic_height is an integral multiple of 8. Eventually add + * logic to try multiple slice heights. + */ + if (vdsc_cfg->pic_height % 8 == 0) + vdsc_cfg->slice_height = 8; + else if (vdsc_cfg->pic_height % 4 == 0) + vdsc_cfg->slice_height = 4; + else + vdsc_cfg->slice_height = 2; + + vdsc_cfg->dsc_version_major = + (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & + DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; + vdsc_cfg->dsc_version_minor = + min(DSC_SUPPORTED_VERSION_MIN, + (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & + DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); + + vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & + DP_DSC_RGB; + + line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); + if (!line_buf_depth) { + DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); + return -EINVAL; + } + + if (vdsc_cfg->dsc_version_minor == 2) + vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? + DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; + else + vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? + DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; + + vdsc_cfg->block_pred_enable = + intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_BLK_PREDICTION_IS_SUPPORTED; + + return drm_dsc_compute_rc_parameters(vdsc_cfg); +} + static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state, @@ -2044,7 +2093,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u8 dsc_max_bpc; int pipe_bpp; int ret; @@ -2080,10 +2129,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->lane_count = limits->max_lane_count; if (intel_dp_is_edp(intel_dp)) { - pipe_config->dsc_params.compressed_bpp = + pipe_config->dsc.compressed_bpp = min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, pipe_config->pipe_bpp); - pipe_config->dsc_params.slice_count = + pipe_config->dsc.slice_count = drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, true); } else { @@ -2104,10 +2153,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); return -EINVAL; } - pipe_config->dsc_params.compressed_bpp = min_t(u16, + pipe_config->dsc.compressed_bpp = min_t(u16, dsc_max_output_bpp >> 4, pipe_config->pipe_bpp); - pipe_config->dsc_params.slice_count = dsc_dp_slice_count; + pipe_config->dsc.slice_count = dsc_dp_slice_count; } /* * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate @@ -2115,29 +2164,29 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, * then we need to use 2 VDSC instances. */ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { - if (pipe_config->dsc_params.slice_count > 1) { - pipe_config->dsc_params.dsc_split = true; + if (pipe_config->dsc.slice_count > 1) { + pipe_config->dsc.dsc_split = true; } else { DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); return -EINVAL; } } - ret = intel_dp_compute_dsc_params(intel_dp, pipe_config); + ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); if (ret < 0) { DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " "Compressed BPP = %d\n", pipe_config->pipe_bpp, - pipe_config->dsc_params.compressed_bpp); + pipe_config->dsc.compressed_bpp); return ret; } - pipe_config->dsc_params.compression_enable = true; + pipe_config->dsc.compression_enable = true; DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " "Compressed Bpp = %d Slice Count = %d\n", pipe_config->pipe_bpp, - pipe_config->dsc_params.compressed_bpp, - pipe_config->dsc_params.slice_count); + pipe_config->dsc.compressed_bpp, + pipe_config->dsc.slice_count); return 0; } @@ -2155,7 +2204,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct link_config_limits limits; int common_len; @@ -2211,15 +2260,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, return ret; } - if (pipe_config->dsc_params.compression_enable) { + if (pipe_config->dsc.compression_enable) { DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp, - pipe_config->dsc_params.compressed_bpp); + pipe_config->dsc.compressed_bpp); DRM_DEBUG_KMS("DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->dsc_params.compressed_bpp), + pipe_config->dsc.compressed_bpp), intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } else { @@ -2243,8 +2292,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp, { const struct drm_display_info *info = &connector->display_info; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + &crtc_state->hw.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); int ret; if (!drm_mode_is_420_only(info, adjusted_mode) || @@ -2272,7 +2321,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; /* * Our YCbCr output is always limited range. @@ -2299,17 +2348,28 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, } } +static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, + enum port port) +{ + if (IS_G4X(dev_priv)) + return false; + if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) + return false; + + return true; +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); enum port port = encoder->port; - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); @@ -2332,7 +2392,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, return ret; pipe_config->has_drrs = false; - if (IS_G4X(dev_priv) || port == PORT_A) + if (!intel_dp_port_has_audio(dev_priv, port)) pipe_config->has_audio = false; else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) pipe_config->has_audio = intel_dp->has_audio; @@ -2377,8 +2437,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->limited_color_range = intel_dp_limited_color_range(pipe_config, conn_state); - if (pipe_config->dsc_params.compression_enable) - output_bpp = pipe_config->dsc_params.compressed_bpp; + if (pipe_config->dsc.compression_enable) + output_bpp = pipe_config->dsc.compressed_bpp; else output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); @@ -2405,9 +2465,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config); - intel_hdcp_transcoder_config(intel_connector, - pipe_config->cpu_transcoder); - return 0; } @@ -2427,8 +2484,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; intel_dp_set_link_params(intel_dp, pipe_config->port_clock, pipe_config->lane_count, @@ -3025,7 +3082,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) static void ironlake_edp_pll_on(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); assert_pipe_disabled(dev_priv, crtc->pipe); @@ -3065,7 +3122,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp, static void ironlake_edp_pll_off(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); assert_pipe_disabled(dev_priv, crtc->pipe); @@ -3102,7 +3159,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, { int ret; - if (!crtc_state->dsc_params.compression_enable) + if (!crtc_state->dsc.compression_enable) return; ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, @@ -3225,7 +3282,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); u32 tmp, flags = 0; enum port port = encoder->port; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); if (encoder->type == INTEL_OUTPUT_EDP) pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); @@ -3260,7 +3317,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_NVSYNC; } - pipe_config->base.adjusted_mode.flags |= flags; + pipe_config->hw.adjusted_mode.flags |= flags; if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; @@ -3277,7 +3334,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->port_clock = 270000; } - pipe_config->base.adjusted_mode.crtc_clock = + pipe_config->hw.adjusted_mode.crtc_clock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); @@ -3492,7 +3549,7 @@ static void intel_enable_dp(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); u32 dp_reg = I915_READ(intel_dp->output_reg); enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; @@ -3625,7 +3682,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); lockdep_assert_held(&dev_priv->pps_mutex); @@ -4147,7 +4204,7 @@ intel_dp_link_down(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; u32 DP = intel_dp->DP; @@ -5070,7 +5127,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, WARN_ON(!intel_crtc_has_dp_encoder(crtc_state)); - if (!crtc_state->base.active) + if (!crtc_state->hw.active) return 0; if (conn_state->commit && @@ -5467,25 +5524,23 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder) return I915_READ(GEN8_DE_PORT_ISR) & bit; } -static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *intel_dig_port) +static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, + enum phy phy) { - enum port port = intel_dig_port->base.port; - - if (HAS_PCH_MCC(dev_priv) && port == PORT_C) + if (HAS_PCH_MCC(dev_priv) && phy == PHY_C) return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); - return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); + return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy); } -static bool icl_digital_port_connected(struct intel_encoder *encoder) +static bool icp_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) - return icl_combo_port_connected(dev_priv, dig_port); + return intel_combo_phy_connected(dev_priv, phy); else if (intel_phy_is_tc(dev_priv, phy)) return intel_tc_port_connected(dig_port); else @@ -5516,9 +5571,9 @@ static bool __intel_digital_port_connected(struct intel_encoder *encoder) return g4x_digital_port_connected(encoder); } - if (INTEL_GEN(dev_priv) >= 11) - return icl_digital_port_connected(encoder); - else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + return icp_digital_port_connected(encoder); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) return spt_digital_port_connected(encoder); else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(encoder); @@ -5688,6 +5743,12 @@ out: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); + /* + * Make sure the refs for power wells enabled during detect are + * dropped to avoid a new detect cycle triggered by HPD polling. + */ + intel_display_power_flush_work(dev_priv); + return status; } @@ -6899,7 +6960,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, int refresh_rate) { struct intel_dp *intel_dp = dev_priv->drrs.dp; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); enum drrs_refresh_rate_type index = DRRS_HIGH_RR; if (refresh_rate <= 0) { @@ -6932,7 +6993,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - if (!crtc_state->base.active) { + if (!crtc_state->hw.active) { DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n"); return; } @@ -7560,11 +7621,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->power_domain = intel_port_to_power_domain(port); if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) - intel_encoder->crtc_mask = BIT(PIPE_C); + intel_encoder->pipe_mask = BIT(PIPE_C); else - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B); + intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); } else { - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 0; intel_encoder->port = port; @@ -7625,7 +7686,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) if (!intel_dp->can_mst) continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, + true); if (ret) { intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index bbcab27644dc..7aa0975c33b7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -42,13 +42,13 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state, struct link_config_limits *limits) { - struct drm_atomic_state *state = crtc_state->base.state; + struct drm_atomic_state *state = crtc_state->uapi.state; struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_dp *intel_dp = &intel_mst->primary->dp; struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; void *port = connector->port; bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); @@ -99,7 +99,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; void *port = connector->port; struct link_config_limits limits; int ret; @@ -168,7 +168,6 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_crtc *new_crtc = new_conn_state->crtc; - struct drm_crtc_state *crtc_state; struct drm_dp_mst_topology_mgr *mgr; int ret; @@ -183,11 +182,16 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, * connector */ if (new_crtc) { - crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); + struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(intel_state, + intel_crtc); if (!crtc_state || - !drm_atomic_crtc_needs_modeset(crtc_state) || - crtc_state->enable) + !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) || + crtc_state->uapi.enable) return 0; } @@ -231,8 +235,32 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + bool last_mst_stream; - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_dp->active_mst_links--; + last_mst_stream = intel_dp->active_mst_links == 0; + + intel_crtc_vblank_off(old_crtc_state); + + intel_disable_pipe(old_crtc_state); + + intel_ddi_disable_transcoder_func(old_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9) + skylake_scaler_disable(old_crtc_state); + else + ironlake_pfit_disable(old_crtc_state); + + /* + * From TGL spec: "If multi-stream slave transcoder: Configure + * Transcoder Clock Select to direct no clock to the transcoder" + * + * From older GENs spec: "Configure Transcoder Clock Select to direct + * no clock to the transcoder" + */ + if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream) + intel_ddi_disable_pipe_clock(old_crtc_state); /* this can fail */ drm_dp_check_act_status(&intel_dp->mst_mgr); @@ -248,14 +276,10 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, false); - intel_dp->active_mst_links--; - intel_mst->connector = NULL; - if (intel_dp->active_mst_links == 0) { - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + if (last_mst_stream) intel_dig_port->base.post_disable(&intel_dig_port->base, old_crtc_state, NULL); - } DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); } @@ -273,20 +297,6 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, pipe_config, NULL); } -static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; - - if (intel_dp->active_mst_links == 0) - intel_dig_port->base.post_pll_disable(&intel_dig_port->base, - old_crtc_state, - old_conn_state); -} - static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) @@ -299,21 +309,23 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, to_intel_connector(conn_state->connector); int ret; u32 temp; + bool first_mst_stream; /* MST encoders are bound to a crtc, not to a connector, * force the mapping here for get_hw_state. */ connector->encoder = encoder; intel_mst->connector = connector; + first_mst_stream = intel_dp->active_mst_links == 0; DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); - if (intel_dp->active_mst_links == 0) + if (first_mst_stream) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); - if (intel_dp->active_mst_links == 0) + if (first_mst_stream) intel_dig_port->base.pre_enable(&intel_dig_port->base, pipe_config, NULL); @@ -330,7 +342,17 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); - intel_ddi_enable_pipe_clock(pipe_config); + /* + * Before Gen 12 this is not done as part of + * intel_dig_port->base.pre_enable() and should be done here. For + * Gen 12+ the step in which this should be done is different for the + * first MST stream, so it's done on the DDI for the first stream and + * here for the following ones. + */ + if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream) + intel_ddi_enable_pipe_clock(pipe_config); + + intel_ddi_set_dp_msa(pipe_config, conn_state); } static void intel_mst_enable_dp(struct intel_encoder *encoder, @@ -391,20 +413,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) return ret; } -static enum drm_connector_status -intel_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_dp *intel_dp = intel_connector->mst_port; - - if (drm_connector_is_unregistered(connector)) - return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, - intel_connector->port); -} - static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { - .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, @@ -465,11 +474,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c return &intel_dp->mst_encoders[crtc->pipe]->base.base; } +static int +intel_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + if (drm_connector_is_unregistered(connector)) + return connector_status_disconnected; + + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, + intel_connector->port); +} + static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, .atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_check = intel_dp_mst_atomic_check, + .detect_ctx = intel_dp_mst_detect, }; static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) @@ -598,8 +622,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum struct intel_dp_mst_encoder *intel_mst; struct intel_encoder *intel_encoder; struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe_iter; intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); @@ -617,14 +639,20 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_encoder->power_domain = intel_dig_port->base.power_domain; intel_encoder->port = intel_dig_port->base.port; intel_encoder->cloneable = 0; - for_each_pipe(dev_priv, pipe_iter) - intel_encoder->crtc_mask |= BIT(pipe_iter); + /* + * This is wrong, but broken userspace uses the intersection + * of possible_crtcs of all the encoders of a given connector + * to figure out which crtcs can drive said connector. What + * should be used instead is the union of possible_crtcs. + * To keep such userspace functioning we must misconfigure + * this to make sure the intersection is not empty :( + */ + intel_encoder->pipe_mask = ~0; intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; - intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; intel_encoder->enable = intel_mst_enable_dp; intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 556d1b30f06a..704f38681c4b 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 val; @@ -783,7 +783,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); enum pipe pipe = crtc->pipe; unsigned int lane_mask = @@ -864,7 +864,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dport_to_channel(dport); enum pipe pipe = crtc->pipe; int data, i, stagger; @@ -953,7 +953,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe; u32 val; vlv_dpio_get(dev_priv); @@ -1016,7 +1016,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); enum pipe pipe = crtc->pipe; @@ -1046,7 +1046,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); enum pipe pipe = crtc->pipe; u32 val; @@ -1075,7 +1075,7 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder, { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum dpio_channel port = vlv_dport_to_channel(dport); enum pipe pipe = crtc->pipe; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index ec10fa7d3c69..728a4b045de7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -136,7 +136,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, */ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; @@ -163,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) */ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int crtc_mask = drm_crtc_mask(&crtc->base); @@ -208,7 +208,7 @@ out: */ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int crtc_mask = drm_crtc_mask(&crtc->base); @@ -526,16 +526,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, val = I915_READ(WRPLL_CTL(id)); I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); POSTING_READ(WRPLL_CTL(id)); + + /* + * Try to set up the PCH reference clock once all DPLLs + * that depend on it have been shut down. + */ + if (dev_priv->pch_ssc_use & BIT(id)) + intel_init_pch_refclk(dev_priv); } static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + enum intel_dpll_id id = pll->info->id; u32 val; val = I915_READ(SPLL_CTL); I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); POSTING_READ(SPLL_CTL); + + /* + * Try to set up the PCH reference clock once all DPLLs + * that depend on it have been shut down. + */ + if (dev_priv->pch_ssc_use & BIT(id)) + intel_init_pch_refclk(dev_priv); } static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, @@ -827,7 +842,7 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, static struct intel_shared_dpll * hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; int clock = crtc_state->port_clock; @@ -1736,7 +1751,7 @@ static bool bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct bxt_clk_div *clk_div) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct dpll best_clock; /* Calculate HDMI div */ @@ -2259,7 +2274,7 @@ static bool cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *wrpll_params) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 afe_clock = crtc_state->port_clock * 5; u32 ref_clock; u32 dco_min = 7998000; @@ -2538,7 +2553,7 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = dev_priv->cdclk.hw.ref == 24000 ? icl_dp_combo_pll_24MHz_values : @@ -2560,7 +2575,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (INTEL_GEN(dev_priv) >= 12) { switch (dev_priv->cdclk.hw.ref) { @@ -2597,7 +2612,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder, struct intel_dpll_hw_state *pll_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 cfgcr0, cfgcr1; struct skl_wrpll_params pll_params = { 0 }; bool ret; @@ -2729,7 +2744,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int refclk_khz = dev_priv->cdclk.hw.ref; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index e7588799fce5..2a104c64291d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -147,11 +147,11 @@ enum intel_dpll_id { */ DPLL_ID_ICL_MGPLL4 = 6, /** - * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5) + * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5) */ DPLL_ID_TGL_MGPLL5 = 7, /** - * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6) + * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6) */ DPLL_ID_TGL_MGPLL6 = 8, }; @@ -337,6 +337,11 @@ struct intel_shared_dpll { * @info: platform specific info */ const struct dpll_info *info; + + /** + * @wakeref: In some platforms a device-level runtime pm reference may + * need to be grabbed to disable DC states while this DPLL is enabled + */ intel_wakeref_t wakeref; }; diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index bb5a0e91b370..ada006a690df 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -102,43 +102,50 @@ intel_dsb_get(struct intel_crtc *crtc) struct intel_dsb *dsb = &crtc->dsb; struct drm_i915_gem_object *obj; struct i915_vma *vma; + u32 *buf; intel_wakeref_t wakeref; if (!HAS_DSB(i915)) return dsb; - if (atomic_add_return(1, &dsb->refcount) != 1) + if (dsb->refcount++ != 0) return dsb; - dsb->id = DSB1; wakeref = intel_runtime_pm_get(&i915->runtime_pm); obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); if (IS_ERR(obj)) { DRM_ERROR("Gem object creation failed\n"); - goto err; + goto out; } - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (IS_ERR(vma)) { DRM_ERROR("Vma creation failed\n"); i915_gem_object_put(obj); - atomic_dec(&dsb->refcount); - goto err; + goto out; } - dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); - if (IS_ERR(dsb->cmd_buf)) { + buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); + if (IS_ERR(buf)) { DRM_ERROR("Command buffer creation failed\n"); - i915_vma_unpin_and_release(&vma, 0); - dsb->cmd_buf = NULL; - atomic_dec(&dsb->refcount); - goto err; + goto out; } + + dsb->id = DSB1; dsb->vma = vma; + dsb->cmd_buf = buf; + +out: + /* + * On error dsb->cmd_buf will continue to be NULL, making the writes + * pass-through. Leave the dangling ref to be removed later by the + * corresponding intel_dsb_put(): the important error message will + * already be logged above. + */ -err: intel_runtime_pm_put(&i915->runtime_pm, wakeref); + return dsb; } @@ -158,10 +165,10 @@ void intel_dsb_put(struct intel_dsb *dsb) if (!HAS_DSB(i915)) return; - if (WARN_ON(atomic_read(&dsb->refcount) == 0)) + if (WARN_ON(dsb->refcount == 0)) return; - if (atomic_dec_and_test(&dsb->refcount)) { + if (--dsb->refcount == 0) { i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP); dsb->cmd_buf = NULL; dsb->free_pos = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 6f95c8e909e6..395ef9ce558e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -22,7 +22,7 @@ enum dsb_id { }; struct intel_dsb { - atomic_t refcount; + long refcount; enum dsb_id id; u32 *cmd_buf; struct i915_vma *vma; diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 9827f99491d1..a74dc5b915d1 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -178,9 +178,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, else flags |= DRM_MODE_FLAG_NVSYNC; - pipe_config->base.adjusted_mode.flags |= flags; + pipe_config->hw.adjusted_mode.flags |= flags; - pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; + pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void intel_disable_dvo(struct intel_encoder *encoder, @@ -207,8 +207,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder, u32 temp = I915_READ(dvo_reg); intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, - &pipe_config->base.mode, - &pipe_config->base.adjusted_mode); + &pipe_config->hw.mode, + &pipe_config->hw.adjusted_mode); I915_WRITE(dvo_reg, temp | DVO_ENABLE); I915_READ(dvo_reg); @@ -253,7 +253,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder, struct intel_dvo *intel_dvo = enc_to_dvo(encoder); const struct drm_display_mode *fixed_mode = intel_dvo->attached_connector->panel.fixed_mode; - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; /* * If we have timings from the BIOS for the panel, put them in @@ -277,8 +277,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); enum pipe pipe = crtc->pipe; u32 dvo_val; @@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv) intel_encoder->type = INTEL_OUTPUT_DVO; intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = port; - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B); + intel_encoder->pipe_mask = ~0; switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 3111ecaeabd0..a1048ece541e 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -50,11 +50,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv) return HAS_FBC(dev_priv); } -static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) -{ - return INTEL_GEN(dev_priv) <= 3; -} - /* * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's @@ -73,7 +68,7 @@ static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc) * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value * we wrote to PIPESRC. */ -static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, +static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, int *width, int *height) { if (width) @@ -83,7 +78,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, } static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, - struct intel_fbc_state_cache *cache) + const struct intel_fbc_state_cache *cache) { int lines; @@ -143,8 +138,10 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) u32 fbc_ctl2; /* Set it up... */ - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE; I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); } @@ -156,7 +153,8 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= params->vma->fence->id; + if (params->fence_id >= 0) + fbc_ctl |= params->fence_id; I915_WRITE(FBC_CONTROL, fbc_ctl); } @@ -176,8 +174,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - if (params->flags & PLANE_HAS_FENCE) { - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; + if (params->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); } else { I915_WRITE(DPFC_FENCE_YOFF, 0); @@ -234,14 +232,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) break; } - if (params->flags & PLANE_HAS_FENCE) { + if (params->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN(dev_priv, 5)) - dpfc_ctl |= params->vma->fence->id; + dpfc_ctl |= params->fence_id; if (IS_GEN(dev_priv, 6)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | - params->vma->fence->id); + params->fence_id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } @@ -253,8 +251,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) } I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); - I915_WRITE(ILK_FBC_RT_BASE, - i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -285,13 +281,12 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) int threshold = dev_priv->fbc.threshold; /* Display WA #0529: skl, kbl, bxt. */ - if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { u32 val = I915_READ(CHICKEN_MISC_4); val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); - if (i915_gem_object_get_tiling(params->vma->obj) != - I915_TILING_X) + if (params->gen9_wa_cfb_stride) val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; I915_WRITE(CHICKEN_MISC_4, val); @@ -317,11 +312,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) break; } - if (params->flags & PLANE_HAS_FENCE) { + if (params->fence_id >= 0) { dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | - params->vma->fence->id); + params->fence_id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } else { I915_WRITE(SNB_DPFC_CTL_SA,0); @@ -367,6 +362,7 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) struct intel_fbc *fbc = &dev_priv->fbc; fbc->active = true; + fbc->activated = true; if (INTEL_GEN(dev_priv) >= 7) gen7_fbc_activate(dev_priv); @@ -419,29 +415,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, fbc->no_fbc_reason = reason; } -static bool multiple_pipes_ok(struct intel_crtc *crtc, - struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - enum pipe pipe = crtc->pipe; - - /* Don't even bother tracking anything we don't need. */ - if (!no_fbc_on_multiple_pipes(dev_priv)) - return true; - - if (plane_state->base.visible) - fbc->visible_pipes_mask |= (1 << pipe); - else - fbc->visible_pipes_mask &= ~(1 << pipe); - - return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; -} - static int find_compression_threshold(struct drm_i915_private *dev_priv, struct drm_mm_node *node, - int size, - int fb_cpp) + unsigned int size, + unsigned int fb_cpp) { int compression_threshold = 1; int ret; @@ -487,18 +464,15 @@ again: } } -static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) +static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, + unsigned int size, unsigned int fb_cpp) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct drm_mm_node *uninitialized_var(compressed_llb); - int size, fb_cpp, ret; + int ret; WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); - size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); - fb_cpp = fbc->state_cache.fb.format->cpp[0]; - ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, size, fb_cpp); if (!ret) @@ -656,46 +630,55 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) } static void intel_fbc_update_state_cache(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_framebuffer *fb = plane_state->hw.fb; - cache->vma = NULL; - cache->flags = 0; + cache->plane.visible = plane_state->uapi.visible; + if (!cache->plane.visible) + return; - cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; + cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; - cache->plane.rotation = plane_state->base.rotation; + cache->plane.rotation = plane_state->hw.rotation; /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; - cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; - cache->plane.visible = plane_state->base.visible; + cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; cache->plane.adjusted_x = plane_state->color_plane[0].x; cache->plane.adjusted_y = plane_state->color_plane[0].y; - cache->plane.y = plane_state->base.src.y1 >> 16; + cache->plane.y = plane_state->uapi.src.y1 >> 16; - cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode; - - if (!cache->plane.visible) - return; + cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; cache->fb.format = fb->format; cache->fb.stride = fb->pitches[0]; - cache->vma = plane_state->vma; - cache->flags = plane_state->flags; - if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence)) - cache->flags &= ~PLANE_HAS_FENCE; + WARN_ON(plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); + + if (plane_state->flags & PLANE_HAS_FENCE && + plane_state->vma->fence) + cache->fence_id = plane_state->vma->fence->id; + else + cache->fence_id = -1; +} + +static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold; } static bool intel_fbc_can_activate(struct intel_crtc *crtc) @@ -704,6 +687,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + /* We don't need to use a state cache here since this information is * global for all CRTC. */ @@ -712,11 +700,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - if (!cache->vma) { - fbc->no_fbc_reason = "primary plane not visible"; - return false; - } - if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { fbc->no_fbc_reason = "incompatible mode"; return false; @@ -740,7 +723,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * For now this will effecively disable FBC with 90/270 degree * rotation. */ - if (!(cache->flags & PLANE_HAS_FENCE)) { + if (cache->fence_id < 0) { fbc->no_fbc_reason = "framebuffer not tiled or fenced"; return false; } @@ -783,8 +766,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * we didn't get any invalidate/deactivate calls, but this would require * a lot of tracking just for a specific case. If we conclude it's an * important case, we can implement it later. */ - if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > - fbc->compressed_fb.size * fbc->threshold) { + if (intel_fbc_cfb_size_changed(dev_priv)) { fbc->no_fbc_reason = "CFB requirements changed"; return false; } @@ -794,7 +776,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (IS_GEN_RANGE(dev_priv, 9, 10) && + if (INTEL_GEN(dev_priv) >= 9 && (fbc->state_cache.plane.adjusted_y & 3)) { fbc->no_fbc_reason = "plane Y offset is misaligned"; return false; @@ -837,8 +819,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, * zero. */ memset(params, 0, sizeof(*params)); - params->vma = cache->vma; - params->flags = cache->flags; + params->fence_id = cache->fence_id; params->crtc.pipe = crtc->pipe; params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; @@ -849,39 +830,88 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); - if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) - params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w, - 32 * fbc->threshold) * 8; + params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + + params->plane_visible = cache->plane.visible; +} + +static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_reg_params *params = &fbc->params; + + if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + return false; + + if (!params->plane_visible) + return false; + + if (!intel_fbc_can_activate(crtc)) + return false; + + if (params->fb.format != cache->fb.format) + return false; + + if (params->fb.stride != cache->fb.stride) + return false; + + if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + return false; + + if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + return false; + + return true; } -void intel_fbc_pre_update(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) +bool intel_fbc_pre_update(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; const char *reason = "update pending"; + bool need_vblank_wait = false; if (!fbc_supported(dev_priv)) - return; + return need_vblank_wait; mutex_lock(&fbc->lock); - if (!multiple_pipes_ok(crtc, plane_state)) { - reason = "more than one pipe active"; - goto deactivate; - } - - if (!fbc->enabled || fbc->crtc != crtc) + if (fbc->crtc != crtc) goto unlock; intel_fbc_update_state_cache(crtc, crtc_state, plane_state); fbc->flip_pending = true; -deactivate: - intel_fbc_deactivate(dev_priv, reason); + if (!intel_fbc_can_flip_nuke(crtc_state)) { + intel_fbc_deactivate(dev_priv, reason); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && + (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + need_vblank_wait = true; + fbc->activated = false; + } unlock: mutex_unlock(&fbc->lock); + + return need_vblank_wait; } /** @@ -897,14 +927,13 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) struct intel_crtc *crtc = fbc->crtc; WARN_ON(!mutex_is_locked(&fbc->lock)); - WARN_ON(!fbc->enabled); + WARN_ON(!fbc->crtc); WARN_ON(fbc->active); DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); __intel_fbc_cleanup_cfb(dev_priv); - fbc->enabled = false; fbc->crtc = NULL; } @@ -915,11 +944,10 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) WARN_ON(!mutex_is_locked(&fbc->lock)); - if (!fbc->enabled || fbc->crtc != crtc) + if (fbc->crtc != crtc) return; fbc->flip_pending = false; - WARN_ON(fbc->active); if (!i915_modparams.enable_fbc) { intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); @@ -933,10 +961,9 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) if (!intel_fbc_can_activate(crtc)) return; - if (!fbc->busy_bits) { - intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); + if (!fbc->busy_bits) intel_fbc_hw_activate(dev_priv); - } else + else intel_fbc_deactivate(dev_priv, "frontbuffer write"); } @@ -955,7 +982,7 @@ void intel_fbc_post_update(struct intel_crtc *crtc) static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) { - if (fbc->enabled) + if (fbc->crtc) return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; else return fbc->possible_framebuffer_bits; @@ -977,7 +1004,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; - if (fbc->enabled && fbc->busy_bits) + if (fbc->crtc && fbc->busy_bits) intel_fbc_deactivate(dev_priv, "frontbuffer write"); mutex_unlock(&fbc->lock); @@ -998,7 +1025,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) goto out; - if (!fbc->busy_bits && fbc->enabled && + if (!fbc->busy_bits && fbc->crtc && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) intel_fbc_recompress(dev_priv); @@ -1047,12 +1074,12 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, * to pipe or plane A. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); if (!plane->has_fbc) continue; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) continue; crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -1081,42 +1108,53 @@ out: * intel_fbc_disable in the middle, as long as it is deactivated. */ void intel_fbc_enable(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct drm_framebuffer *fb = plane_state->hw.fb; if (!fbc_supported(dev_priv)) return; mutex_lock(&fbc->lock); - if (fbc->enabled) { - WARN_ON(fbc->crtc == NULL); - if (fbc->crtc == crtc) { - WARN_ON(!crtc_state->enable_fbc); - WARN_ON(fbc->active); - } - goto out; - } + if (fbc->crtc) { + if (fbc->crtc != crtc || + !intel_fbc_cfb_size_changed(dev_priv)) + goto out; - if (!crtc_state->enable_fbc) - goto out; + __intel_fbc_disable(dev_priv); + } WARN_ON(fbc->active); - WARN_ON(fbc->crtc != NULL); intel_fbc_update_state_cache(crtc, crtc_state, plane_state); - if (intel_fbc_alloc_cfb(crtc)) { + + /* FIXME crtc_state->enable_fbc lies :( */ + if (!cache->plane.visible) + goto out; + + if (intel_fbc_alloc_cfb(dev_priv, + intel_fbc_calculate_cfb_size(dev_priv, cache), + fb->format->cpp[0])) { + cache->plane.visible = false; fbc->no_fbc_reason = "not enough stolen memory"; goto out; } + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + fb->modifier != I915_FORMAT_MOD_X_TILED) + cache->gen9_wa_cfb_stride = + DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + cache->gen9_wa_cfb_stride = 0; + DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; - fbc->enabled = true; fbc->crtc = crtc; out: mutex_unlock(&fbc->lock); @@ -1156,7 +1194,7 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv) return; mutex_lock(&fbc->lock); - if (fbc->enabled) { + if (fbc->crtc) { WARN_ON(fbc->crtc->active); __intel_fbc_disable(dev_priv); } @@ -1172,7 +1210,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) mutex_lock(&fbc->lock); /* Maybe we were scheduled twice. */ - if (fbc->underrun_detected || !fbc->enabled) + if (fbc->underrun_detected || !fbc->crtc) goto out; DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n"); @@ -1244,28 +1282,6 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) schedule_work(&fbc->underrun_work); } -/** - * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking - * @dev_priv: i915 device instance - * - * The FBC code needs to track CRTC visibility since the older platforms can't - * have FBC enabled while multiple pipes are used. This function does the - * initial setup at driver load to make sure FBC is matching the real hardware. - */ -void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - - /* Don't even bother tracking anything if we don't need. */ - if (!no_fbc_on_multiple_pipes(dev_priv)) - return; - - for_each_intel_crtc(&dev_priv->drm, crtc) - if (intel_crtc_active(crtc) && - crtc->base.primary->state->visible) - dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); -} - /* * The DDX driver changes its behavior depending on the value it reads from * i915.enable_fbc, so sanitize it by translating the default value into either @@ -1283,10 +1299,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (!HAS_FBC(dev_priv)) return 0; - /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */ - if (IS_GEMINILAKE(dev_priv)) - return 0; - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) return 1; @@ -1317,7 +1329,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); - fbc->enabled = false; fbc->active = false; if (!drm_mm_initialized(&dev_priv->mm.stolen)) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 50272eda8d43..c8a5e5098687 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -19,15 +19,14 @@ struct intel_plane_state; void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct intel_atomic_state *state); bool intel_fbc_is_active(struct drm_i915_private *dev_priv); -void intel_fbc_pre_update(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state); +bool intel_fbc_pre_update(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); void intel_fbc_post_update(struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); -void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); void intel_fbc_enable(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state); + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); void intel_fbc_disable(struct intel_crtc *crtc); void intel_fbc_global_disable(struct drm_i915_private *dev_priv); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 481ef97c8af1..1e98e432c9fa 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -234,6 +234,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = ggtt->gmadr.start; info->apertures->ranges[0].size = ggtt->mappable_end; + /* Our framebuffer is the entirety of fbdev's system memory */ + info->fix.smem_start = + (unsigned long)(ggtt->gmadr.start + vma->node.start); + info->fix.smem_len = vma->node.size; + vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); @@ -243,10 +248,6 @@ static int intelfb_create(struct drm_fb_helper *helper, info->screen_base = vaddr; info->screen_size = vma->node.size; - /* Our framebuffer is the entirety of fbdev's system memory */ - info->fix.smem_start = (unsigned long)info->screen_base; - info->fix.smem_len = info->screen_size; - drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); /* If the object is shmemfs backed, it will have given us zeroed pages. diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 84b164f31895..6cb02c912acc 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref) vma->display_alignment = I915_GTT_MIN_ALIGNMENT; spin_unlock(&obj->vma.lock); - obj->frontbuffer = NULL; + RCU_INIT_POINTER(obj->frontbuffer, NULL); spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); i915_gem_object_put(obj); - kfree(front); + kfree_rcu(front, rcu); } struct intel_frontbuffer * @@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_frontbuffer *front; - spin_lock(&i915->fb_tracking.lock); - front = obj->frontbuffer; - if (front) - kref_get(&front->ref); - spin_unlock(&i915->fb_tracking.lock); + front = __intel_frontbuffer_get(obj); if (front) return front; @@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) i915_active_may_sleep(frontbuffer_retire)); spin_lock(&i915->fb_tracking.lock); - if (obj->frontbuffer) { + if (rcu_access_pointer(obj->frontbuffer)) { kfree(front); - front = obj->frontbuffer; + front = rcu_dereference_protected(obj->frontbuffer, true); kref_get(&front->ref); } else { i915_gem_object_get(obj); - obj->frontbuffer = front; + rcu_assign_pointer(obj->frontbuffer, front); } spin_unlock(&i915->fb_tracking.lock); diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index adc64d61a4a5..6d41f5394425 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -27,10 +27,10 @@ #include <linux/atomic.h> #include <linux/kref.h> +#include "gem/i915_gem_object_types.h" #include "i915_active.h" struct drm_i915_private; -struct drm_i915_gem_object; enum fb_op_origin { ORIGIN_GTT, @@ -45,6 +45,7 @@ struct intel_frontbuffer { atomic_t bits; struct i915_active write; struct drm_i915_gem_object *obj; + struct rcu_head rcu; }; void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, @@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, void intel_frontbuffer_flip(struct drm_i915_private *i915, unsigned frontbuffer_bits); +void intel_frontbuffer_put(struct intel_frontbuffer *front); + +static inline struct intel_frontbuffer * +__intel_frontbuffer_get(const struct drm_i915_gem_object *obj) +{ + struct intel_frontbuffer *front; + + if (likely(!rcu_access_pointer(obj->frontbuffer))) + return NULL; + + rcu_read_lock(); + do { + front = rcu_dereference(obj->frontbuffer); + if (!front) + break; + + if (unlikely(!kref_get_unless_zero(&front->ref))) + continue; + + if (likely(front == rcu_access_pointer(obj->frontbuffer))) + break; + + intel_frontbuffer_put(front); + } while (1); + rcu_read_unlock(); + + return front; +} + struct intel_frontbuffer * intel_frontbuffer_get(struct drm_i915_gem_object *obj); @@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, struct intel_frontbuffer *new, unsigned int frontbuffer_bits); -void intel_frontbuffer_put(struct intel_frontbuffer *front); - #endif /* __INTEL_FRONTBUFFER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index e69fa34528df..0fdbd39f6641 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -922,7 +922,7 @@ static void intel_hdcp_prop_work(struct work_struct *work) bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) { /* PORT E doesn't have HDCP, and PORT F is disabled */ - return INTEL_GEN(dev_priv) >= 9 && port < PORT_E; + return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E; } static int @@ -1821,23 +1821,6 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) } } -void intel_hdcp_transcoder_config(struct intel_connector *connector, - enum transcoder cpu_transcoder) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; - - if (!hdcp->shim) - return; - - if (INTEL_GEN(dev_priv) >= 12) { - mutex_lock(&hdcp->mutex); - hdcp->cpu_transcoder = cpu_transcoder; - hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); - mutex_unlock(&hdcp->mutex); - } -} - static inline int initialize_hdcp_port_data(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { @@ -1887,7 +1870,7 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) return false; return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || - IS_KABYLAKE(dev_priv)); + IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)); } void intel_hdcp_component_init(struct drm_i915_private *dev_priv) @@ -1959,8 +1942,10 @@ int intel_hdcp_init(struct intel_connector *connector, return 0; } -int intel_hdcp_enable(struct intel_connector *connector, u8 content_type) +int intel_hdcp_enable(struct intel_connector *connector, + enum transcoder cpu_transcoder, u8 content_type) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; int ret = -EINVAL; @@ -1972,6 +1957,11 @@ int intel_hdcp_enable(struct intel_connector *connector, u8 content_type) WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = content_type; + if (INTEL_GEN(dev_priv) >= 12) { + hdcp->cpu_transcoder = cpu_transcoder; + hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); + } + /* * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup * is capable of HDCP2.2, it is preferred to use HDCP2.2. diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 41c1053d9e38..f3c3272e712a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -21,11 +21,10 @@ enum transcoder; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); -void intel_hdcp_transcoder_config(struct intel_connector *connector, - enum transcoder cpu_transcoder); int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); -int intel_hdcp_enable(struct intel_connector *connector, u8 content_type); +int intel_hdcp_enable(struct intel_connector *connector, + enum transcoder cpu_transcoder, u8 content_type); int intel_hdcp_disable(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); bool intel_hdcp_capable(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index b54ccbb5aad5..685589064d10 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -285,7 +285,7 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); int i; @@ -321,7 +321,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 val, *data = frame; int i; @@ -340,7 +340,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; i915_reg_t reg = TVIDEO_DIP_CTL(pipe); u32 val = I915_READ(reg); @@ -362,7 +362,7 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); int i; @@ -401,7 +401,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 val, *data = frame; int i; @@ -420,7 +420,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) @@ -438,7 +438,7 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); int i; @@ -474,7 +474,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); u32 val, *data = frame; int i; @@ -493,7 +493,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) @@ -708,7 +708,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, { struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; int ret; @@ -804,7 +804,7 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder, ret = drm_hdmi_vendor_infoframe_from_display_mode(frame, conn_state->connector, - &crtc_state->base.adjusted_mode); + &crtc_state->hw.adjusted_mode); if (WARN_ON(ret)) return false; @@ -965,7 +965,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg; if ((crtc_state->infoframes.enable & @@ -990,7 +990,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg; if ((crtc_state->infoframes.enable & @@ -1027,7 +1027,7 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder, /* Enable default_phase whenever the display mode is suitably aligned */ if (gcp_default_phase_possible(crtc_state->pipe_bpp, - &crtc_state->base.adjusted_mode)) + &crtc_state->hw.adjusted_mode)) crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE; } @@ -1037,7 +1037,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); @@ -1096,7 +1096,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -1145,7 +1145,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -1736,9 +1736,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; u32 hdmi_val; intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); @@ -1829,7 +1829,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, tmp & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - pipe_config->base.adjusted_mode.flags |= flags; + pipe_config->hw.adjusted_mode.flags |= flags; if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) dotclock = pipe_config->port_clock * 2 / 3; @@ -1839,7 +1839,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; - pipe_config->base.adjusted_mode.crtc_clock = dotclock; + pipe_config->hw.adjusted_mode.crtc_clock = dotclock; pipe_config->lane_count = 4; @@ -1860,7 +1860,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); WARN_ON(!pipe_config->has_hdmi_sink); DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", @@ -1946,7 +1946,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum pipe pipe = crtc->pipe; u32 temp; @@ -2010,7 +2010,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); @@ -2210,12 +2210,12 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc) { struct drm_i915_private *dev_priv = - to_i915(crtc_state->base.crtc->dev); - struct drm_atomic_state *state = crtc_state->base.state; + to_i915(crtc_state->uapi.crtc->dev); + struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; struct drm_connector *connector; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; int i; if (HAS_GMCH(dev_priv)) @@ -2240,7 +2240,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, for_each_new_connector_in_state(state, connector, connector_state, i) { const struct drm_display_info *info = &connector->display_info; - if (connector_state->crtc != crtc_state->base.crtc) + if (connector_state->crtc != crtc_state->uapi.crtc) continue; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { @@ -2281,7 +2281,7 @@ static bool intel_hdmi_ycbcr420_config(struct drm_connector *connector, struct intel_crtc_state *config) { - struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc); if (!connector->ycbcr_420_allowed) { DRM_ERROR("Platform doesn't support YCBCR420 output\n"); @@ -2336,7 +2336,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) @@ -2378,7 +2378,7 @@ static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_s const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; /* * Our YCbCr output is always limited range. @@ -2406,7 +2406,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; struct intel_digital_connector_state *intel_conn_state = @@ -2451,8 +2451,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (ret) return ret; - /* Set user selected PAR to incoming mode's member */ - adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; + if (conn_state->picture_aspect_ratio) + adjusted_mode->picture_aspect_ratio = + conn_state->picture_aspect_ratio; pipe_config->lane_count = 4; @@ -2489,9 +2490,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return -EINVAL; } - intel_hdcp_transcoder_config(intel_hdmi->attached_connector, - pipe_config->cpu_transcoder); - return 0; } @@ -2626,6 +2624,12 @@ out: if (status != connector_status_connected) cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier); + /* + * Make sure the refs for power wells enabled during detect are + * dropped to avoid a new detect cycle triggered by HPD polling. + */ + intel_display_power_flush_work(dev_priv); + return status; } @@ -2869,7 +2873,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_colorspace_property(connector); drm_connector_attach_content_type_property(connector); - connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) drm_object_attach_property(&connector->base, @@ -3128,20 +3131,29 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + struct i2c_adapter *ddc; enum port port = intel_encoder->port; struct cec_connector_info conn_info; DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n", intel_encoder->base.base.id, intel_encoder->base.name); + if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A)) + return; + if (WARN(intel_dig_port->max_lanes < 4, "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", intel_dig_port->max_lanes, intel_encoder->base.base.id, intel_encoder->base.name)) return; - drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); + intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); + ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); + + drm_connector_init_with_ddc(dev, connector, + &intel_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA, + ddc); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); connector->interlace_allowed = 1; @@ -3151,10 +3163,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) connector->ycbcr_420_allowed = true; - intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); - - if (WARN_ON(port == PORT_A)) - return; intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); if (HAS_DDI(dev_priv)) @@ -3277,11 +3285,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder->port = port; if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) - intel_encoder->crtc_mask = BIT(PIPE_C); + intel_encoder->pipe_mask = BIT(PIPE_C); else - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B); + intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); } else { - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; /* diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index f8f1308643a9..5145ff8b962b 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -189,7 +189,7 @@ void lspcon_ycbcr420_config(struct drm_connector *connector, { const struct drm_display_info *info = &connector->display_info; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; if (drm_mode_is_420_only(info, adjusted_mode) && connector->ycbcr_420_allowed) { @@ -475,7 +475,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct intel_lspcon *lspcon = &dig_port->lspcon; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; if (!lspcon->active) { DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 13841d7c455b..10696bb99dcf 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -135,7 +135,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, else flags |= DRM_MODE_FLAG_PVSYNC; - pipe_config->base.adjusted_mode.flags |= flags; + pipe_config->hw.adjusted_mode.flags |= flags; if (INTEL_GEN(dev_priv) < 5) pipe_config->gmch_pfit.lvds_border_bits = @@ -148,7 +148,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; } - pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; + pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, @@ -230,8 +230,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum pipe pipe = crtc->pipe; u32 temp; @@ -392,8 +392,8 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, to_lvds_encoder(&intel_encoder->base); struct intel_connector *intel_connector = lvds_encoder->attached_connector; - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); unsigned int lvds_bpp; /* Should never happen!! */ @@ -899,12 +899,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = PORT_NONE; intel_encoder->cloneable = 0; - if (HAS_PCH_SPLIT(dev_priv)) - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); - else if (IS_GEN(dev_priv, 4)) - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B); + if (INTEL_GEN(dev_priv) < 4) + intel_encoder->pipe_mask = BIT(PIPE_B); else - intel_encoder->crtc_mask = BIT(PIPE_B); + intel_encoder->pipe_mask = ~0; drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 969ade623691..e59b4992ba1b 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -941,6 +941,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; + /* + * Indicate we handle monitor hotplug events ourselves so we do + * not need ACPI notifications for them. Disabling these avoids + * triggering the AML code doing the notifation, which may be + * broken as Windows also seems to disable these. + */ + opregion->acpi->chpd = 1; } if (mboxes & MBOX_SWSCI) { diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 2360f19f9694..e40c3a0e2cd7 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -30,6 +30,7 @@ #include <drm/i915_drm.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_reg.h" @@ -278,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, struct i915_vma *vma) { enum pipe pipe = overlay->crtc->pipe; + struct intel_frontbuffer *from = NULL, *to = NULL; WARN_ON(overlay->old_vma); - intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL, - vma ? vma->obj->frontbuffer : NULL, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + if (overlay->vma) + from = intel_frontbuffer_get(overlay->vma->obj); + if (vma) + to = intel_frontbuffer_get(vma->obj); + + intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe)); + + if (to) + intel_frontbuffer_put(to); + if (from) + intel_frontbuffer_put(from); intel_frontbuffer_flip_prepare(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(pipe)); @@ -667,8 +677,8 @@ static void update_colorkey(struct intel_overlay *overlay, if (overlay->color_key_enabled) flags |= DST_KEY_ENABLE; - if (state->base.visible) - format = state->base.fb->format->format; + if (state->uapi.visible) + format = state->hw.fb->format->format; switch (format) { case DRM_FORMAT_C8: @@ -757,15 +767,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, atomic_inc(&dev_priv->gpu_error.pending_fb_pin); - i915_gem_object_lock(new_bo); vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, PIN_MAPPABLE); - i915_gem_object_unlock(new_bo); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out_pin_section; } - intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB); + i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB); if (!overlay->active) { u32 oconfig; @@ -1325,12 +1333,14 @@ err_put_bo: void intel_overlay_setup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; + struct intel_engine_cs *engine; int ret; if (!HAS_OVERLAY(dev_priv)) return; - if (!HAS_ENGINE(dev_priv, RCS0)) + engine = dev_priv->engine[RCS0]; + if (!engine || !engine->kernel_context) return; overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); @@ -1338,7 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) return; overlay->i915 = dev_priv; - overlay->context = dev_priv->engine[RCS0]->kernel_context; + overlay->context = engine->kernel_context; GEM_BUG_ON(!overlay->context); overlay->color_key = 0x0101fe; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index bc14e9c0285a..7b3ec6eb3382 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -178,7 +178,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, struct intel_crtc_state *pipe_config, int fitting_mode) { - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int x = 0, y = 0, width = 0, height = 0; /* Native modes don't need fitting */ @@ -300,7 +300,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target) static void i965_scale_aspect(struct intel_crtc_state *pipe_config, u32 *pfit_control) { - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_config->pipe_src_h; u32 scaled_height = pipe_config->pipe_src_w * @@ -321,7 +321,7 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config, u32 *pfit_control, u32 *pfit_pgm_ratios, u32 *border) { - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_config->pipe_src_h; u32 scaled_height = pipe_config->pipe_src_w * @@ -380,7 +380,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; /* Native modes don't need fitting */ if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && @@ -1047,7 +1047,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; u32 ctl, ctl2; ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); @@ -1077,7 +1077,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; u32 pwm_ctl, val; /* Controller 1 uses the utility pin. */ @@ -1189,7 +1189,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; if (!panel->backlight.present) return; @@ -1840,13 +1840,22 @@ static int pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_panel *panel = &connector->panel; + const char *desc; int retval; - /* Get the PWM chip for backlight control */ - panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight"); + /* Get the right PWM chip for DSI backlight according to VBT */ + if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { + panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight"); + desc = "PMIC"; + } else { + panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight"); + desc = "SoC"; + } + if (IS_ERR(panel->backlight.pwm)) { - DRM_ERROR("Failed to own the pwm chip\n"); + DRM_ERROR("Failed to get the %s PWM chip\n", desc); panel->backlight.pwm = NULL; return -ENODEV; } @@ -1873,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, CRC_PMIC_PWM_PERIOD_NS); panel->backlight.enabled = panel->backlight.level != 0; + DRM_INFO("Using %s PWM for LCD backlight control\n", desc); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 6260a2082719..2746512f4466 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -309,13 +309,13 @@ retry: goto put_state; } - pipe_config->base.mode_changed = pipe_config->has_psr; + pipe_config->uapi.mode_changed = pipe_config->has_psr; pipe_config->crc_enabled = enable; if (IS_HASWELL(dev_priv) && - pipe_config->base.active && crtc->pipe == PIPE_A && + pipe_config->hw.active && crtc->pipe == PIPE_A && pipe_config->cpu_transcoder == TRANSCODER_EDP) - pipe_config->base.mode_changed = true; + pipe_config->uapi.mode_changed = true; ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 50f22abcd30e..16e9ff47d519 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -26,6 +26,7 @@ #include "display/intel_dp.h" #include "i915_drv.h" +#include "intel_atomic.h" #include "intel_display_types.h" #include "intel_psr.h" #include "intel_sprite.h" @@ -76,7 +77,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { /* Cannot enable DSC and PSR2 simultaneously */ - WARN_ON(crtc_state->dsc_params.compression_enable && + WARN_ON(crtc_state->dsc.compression_enable && crtc_state->has_psr2); switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { @@ -401,7 +402,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) /* Enable ALPM at sink for psr2 */ if (dev_priv->psr.psr2_enabled) { drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, - DP_ALPM_ENABLE); + DP_ALPM_ENABLE | + DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); + dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; } else { if (dev_priv->psr.link_standby) @@ -536,11 +539,11 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) { - if (!cstate || !cstate->base.active) + if (!cstate || !cstate->hw.active) return 0; return DIV_ROUND_UP(1000 * 1000, - drm_mode_vrefresh(&cstate->base.adjusted_mode)); + drm_mode_vrefresh(&cstate->hw.adjusted_mode)); } static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, @@ -605,9 +608,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; - int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; - int psr_max_h = 0, psr_max_v = 0; + int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; + int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; + int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; if (!dev_priv->psr.sink_psr2_support) return false; @@ -623,7 +626,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * resolution requires DSC to be enabled, priority is given to DSC * over PSR2. */ - if (crtc_state->dsc_params.compression_enable) { + if (crtc_state->dsc.compression_enable) { DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); return false; } @@ -631,12 +634,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (INTEL_GEN(dev_priv) >= 12) { psr_max_h = 5120; psr_max_v = 3200; + max_bpp = 30; } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { psr_max_h = 4096; psr_max_v = 2304; + max_bpp = 24; } else if (IS_GEN(dev_priv, 9)) { psr_max_h = 3640; psr_max_v = 2304; + max_bpp = 24; } if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { @@ -646,6 +652,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (crtc_state->pipe_bpp > max_bpp) { + DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n", + crtc_state->pipe_bpp, max_bpp); + return false; + } + /* * HW sends SU blocks of size four scan lines, which means the starting * X coordinate and Y granularity requirements will always be met. We @@ -672,7 +684,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; int psr_setup_time; if (!CAN_PSR(dev_priv)) @@ -740,25 +752,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp) dev_priv->psr.active = true; } -static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - static const i915_reg_t regs[] = { - [TRANSCODER_A] = CHICKEN_TRANS_A, - [TRANSCODER_B] = CHICKEN_TRANS_B, - [TRANSCODER_C] = CHICKEN_TRANS_C, - [TRANSCODER_EDP] = CHICKEN_TRANS_EDP, - }; - - WARN_ON(INTEL_GEN(dev_priv) < 9); - - if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) || - !regs[cpu_transcoder].reg)) - cpu_transcoder = TRANSCODER_A; - - return regs[cpu_transcoder]; -} - static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -774,8 +767,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))) { - i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, - cpu_transcoder); + i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); u32 chicken = I915_READ(reg); chicken |= PSR2_VSC_ENABLE_PROG_HEADER | @@ -812,7 +804,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state); dev_priv->psr.transcoder = crtc_state->cpu_transcoder; @@ -944,6 +936,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + if (dev_priv->psr.psr2_enabled) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); + dev_priv->psr.enabled = false; } @@ -1059,7 +1054,7 @@ unlock: int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, u32 *out_value) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) @@ -1116,7 +1111,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) struct drm_device *dev = &dev_priv->drm; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; - struct drm_crtc *crtc; + struct intel_crtc *crtc; int err; state = drm_atomic_state_alloc(dev); @@ -1127,21 +1122,18 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) state->acquire_ctx = &ctx; retry: - drm_for_each_crtc(crtc, dev) { - struct drm_crtc_state *crtc_state; - struct intel_crtc_state *intel_crtc_state; + for_each_intel_crtc(dev, crtc) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_crtc_state(state, crtc); - crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { err = PTR_ERR(crtc_state); goto error; } - intel_crtc_state = to_intel_crtc_state(crtc_state); - - if (crtc_state->active && intel_crtc_state->has_psr) { + if (crtc_state->hw.active && crtc_state->has_psr) { /* Mark mode as changed to trigger a pipe->update() */ - crtc_state->mode_changed = true; + crtc_state->uapi.mode_changed = true; break; } } @@ -1399,11 +1391,80 @@ void intel_psr_init(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->psr.lock); } -void intel_psr_short_pulse(struct intel_dp *intel_dp) +static int psr_get_status_and_error_status(struct intel_dp *intel_dp, + u8 *status, u8 *error_status) +{ + struct drm_dp_aux *aux = &intel_dp->aux; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); + if (ret != 1) + return ret; + + ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); + if (ret != 1) + return ret; + + *status = *status & DP_PSR_SINK_STATE_MASK; + + return 0; +} + +static void psr_alpm_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_dp_aux *aux = &intel_dp->aux; struct i915_psr *psr = &dev_priv->psr; u8 val; + int r; + + if (!psr->psr2_enabled) + return; + + r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); + if (r != 1) { + DRM_ERROR("Error reading ALPM status\n"); + return; + } + + if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { + intel_psr_disable_locked(intel_dp); + psr->sink_not_reliable = true; + DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n"); + + /* Clearing error */ + drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); + } +} + +static void psr_capability_changed_check(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_psr *psr = &dev_priv->psr; + u8 val; + int r; + + r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); + if (r != 1) { + DRM_ERROR("Error reading DP_PSR_ESI\n"); + return; + } + + if (val & DP_PSR_CAPS_CHANGE) { + intel_psr_disable_locked(intel_dp); + psr->sink_not_reliable = true; + DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n"); + + /* Clearing it */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); + } +} + +void intel_psr_short_pulse(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_psr *psr = &dev_priv->psr; + u8 status, error_status; const u8 errors = DP_PSR_RFB_STORAGE_ERROR | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | DP_PSR_LINK_CRC_ERROR; @@ -1416,38 +1477,34 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) if (!psr->enabled || psr->dp != intel_dp) goto exit; - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { - DRM_ERROR("PSR_STATUS dpcd read failed\n"); + if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { + DRM_ERROR("Error reading PSR status or error status\n"); goto exit; } - if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { - DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); + if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; } - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { - DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); - goto exit; - } - - if (val & DP_PSR_RFB_STORAGE_ERROR) + if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) + DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); + if (error_status & DP_PSR_RFB_STORAGE_ERROR) DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); - if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) + if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); - if (val & DP_PSR_LINK_CRC_ERROR) - DRM_ERROR("PSR Link CRC error, disabling PSR\n"); + if (error_status & DP_PSR_LINK_CRC_ERROR) + DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n"); - if (val & ~errors) + if (error_status & ~errors) DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", - val & ~errors); - if (val & errors) { - intel_psr_disable_locked(intel_dp); - psr->sink_not_reliable = true; - } + error_status & ~errors); /* clear status register */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); + + psr_alpm_check(intel_dp); + psr_capability_changed_check(intel_dp); + exit: mutex_unlock(&psr->lock); } diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 47f5d87a938a..8758ee2a4442 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1087,7 +1087,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, { struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; int ret; if (!crtc_state->has_hdmi_sink) @@ -1276,8 +1276,8 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, to_intel_sdvo_connector_state(conn_state); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(conn_state->connector); - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - struct drm_display_mode *mode = &pipe_config->base.mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_display_mode *mode = &pipe_config->hw.mode; DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); pipe_config->pipe_bpp = 8*3; @@ -1349,9 +1349,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, if (IS_TV(intel_sdvo_connector)) i9xx_adjust_sdvo_tv_clock(pipe_config); - /* Set user selected PAR to incoming mode's member */ - if (intel_sdvo_connector->is_hdmi) - adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; + if (conn_state->picture_aspect_ratio) + adjusted_mode->picture_aspect_ratio = + conn_state->picture_aspect_ratio; if (!intel_sdvo_compute_avi_infoframe(intel_sdvo, pipe_config, conn_state)) { @@ -1429,13 +1429,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state); const struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(conn_state->connector); - const struct drm_display_mode *mode = &crtc_state->base.mode; + const struct drm_display_mode *mode = &crtc_state->hw.mode; struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; @@ -1629,7 +1629,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_NVSYNC; } - pipe_config->base.adjusted_mode.flags |= flags; + pipe_config->hw.adjusted_mode.flags |= flags; /* * pixel multiplier readout is tricky: Only on i915g/gm it is stored in @@ -1649,7 +1649,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; - pipe_config->base.adjusted_mode.crtc_clock = dotclock; + pipe_config->hw.adjusted_mode.crtc_clock = dotclock; /* Cross check the port pixel multiplier with the sdvo encoder state. */ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, @@ -1701,7 +1701,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state) { const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; u8 *eld = connector->eld; @@ -1723,7 +1723,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; if (old_crtc_state->has_audio) @@ -1785,7 +1785,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); u32 temp; bool input1, input2; int i; @@ -2654,7 +2654,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, intel_attach_broadcast_rgb_property(&connector->base.base); } intel_attach_aspect_ratio_property(&connector->base.base); - connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; } static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) @@ -2921,7 +2920,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) bytes[0], bytes[1]); return false; } - intel_sdvo->base.crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + intel_sdvo->base.pipe_mask = ~0; return true; } diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 5ae12ab3c5b7..3f7b8f2ff671 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -81,9 +81,9 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, */ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); @@ -120,7 +120,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; - trace_i915_pipe_update_start(crtc); + trace_intel_pipe_update_start(crtc); for (;;) { /* @@ -173,7 +173,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) crtc->debug.start_vbl_time = ktime_get(); crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); - trace_i915_pipe_update_vblank_evaded(crtc); + trace_intel_pipe_update_vblank_evaded(crtc); return; irq_disable: @@ -190,27 +190,28 @@ irq_disable: */ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; int scanline_end = intel_get_crtc_scanline(crtc); u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); ktime_t end_vbl_time = ktime_get(); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); + trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); /* We're still in the vblank-evade critical section, this can't race. * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a * while ... */ - if (new_crtc_state->base.event) { + if (new_crtc_state->uapi.event) { WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0); spin_lock(&crtc->base.dev->event_lock); - drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event); + drm_crtc_arm_vblank_event(&crtc->base, + new_crtc_state->uapi.event); spin_unlock(&crtc->base.dev->event_lock); - new_crtc_state->base.event = NULL; + new_crtc_state->uapi.event = NULL; } local_irq_enable(); @@ -239,9 +240,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) int intel_plane_check_stride(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; u32 stride, max_stride; /* @@ -251,7 +252,7 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state) * kick in due the plane being invisible. */ if (intel_plane_can_remap(plane_state) && - !plane_state->base.visible) + !plane_state->uapi.visible) return 0; /* FIXME other color planes? */ @@ -271,10 +272,10 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state) int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_rect *src = &plane_state->base.src; + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_rect *src = &plane_state->uapi.src; u32 src_x, src_y, src_w, src_h, hsub, vsub; - bool rotated = drm_rotation_90_or_270(plane_state->base.rotation); + bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); /* * Hardware doesn't handle subpixel coordinates. @@ -322,6 +323,55 @@ bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) icl_hdr_plane_mask() & BIT(plane_id); } +static void +skl_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + if (fb->format->cpp[0] == 8) { + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + *num = 10; + *den = 8; + } else { + *num = 9; + *den = 8; + } + } else { + *num = 1; + *den = 1; + } +} + +static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); + unsigned int pixel_rate = crtc_state->pixel_rate; + unsigned int src_w, src_h, dst_w, dst_h; + unsigned int num, den; + + skl_plane_ratio(crtc_state, plane_state, &num, &den); + + /* two pixels per clock on glk+ */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + den *= 2; + + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + dst_w = drm_rect_width(&plane_state->uapi.dst); + dst_h = drm_rect_height(&plane_state->uapi.dst); + + /* Downscaling limits the maximum pixel rate */ + dst_w = min(src_w, dst_w); + dst_h = min(src_h, dst_h); + + return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h), + mul_u32_u32(den, dst_w * dst_h)); +} + static unsigned int skl_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -346,28 +396,28 @@ skl_program_scaler(struct intel_plane *plane, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler = &crtc_state->scaler_state.scalers[scaler_id]; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - u32 crtc_w = drm_rect_width(&plane_state->base.dst); - u32 crtc_h = drm_rect_height(&plane_state->base.dst); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); + u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u16 y_hphase, uv_rgb_hphase; u16 y_vphase, uv_rgb_vphase; int hscale, vscale; - hscale = drm_rect_calc_hscale(&plane_state->base.src, - &plane_state->base.dst, + hscale = drm_rect_calc_hscale(&plane_state->uapi.src, + &plane_state->uapi.dst, 0, INT_MAX); - vscale = drm_rect_calc_vscale(&plane_state->base.src, - &plane_state->base.dst, + vscale = drm_rect_calc_vscale(&plane_state->uapi.src, + &plane_state->uapi.dst, 0, INT_MAX); /* TODO: handle sub-pixel coordinates */ - if (drm_format_info_is_yuv_semiplanar(fb->format) && + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && !icl_is_hdr_plane(dev_priv, plane->id)) { y_hphase = skl_scaler_calc_phase(1, hscale, false); y_vphase = skl_scaler_calc_phase(1, vscale, false); @@ -492,10 +542,10 @@ icl_program_input_csc(struct intel_plane *plane, }; const u16 *csc; - if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - csc = input_csc_matrix[plane_state->base.color_encoding]; + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + csc = input_csc_matrix[plane_state->hw.color_encoding]; else - csc = input_csc_matrix_lr[plane_state->base.color_encoding]; + csc = input_csc_matrix_lr[plane_state->hw.color_encoding]; I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | GOFF(csc[1])); @@ -509,7 +559,7 @@ icl_program_input_csc(struct intel_plane *plane, I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), PREOFF_YUV_TO_RGB_HI); - if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0); else I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), @@ -525,7 +575,7 @@ static void skl_program_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - int color_plane, bool slave, u32 plane_ctl) + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; @@ -533,19 +583,20 @@ skl_program_plane(struct intel_plane *plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr = plane_state->color_plane[color_plane].offset; u32 stride = skl_plane_stride(plane_state, color_plane); + u32 aux_dist = plane_state->color_plane[1].offset - surf_addr; u32 aux_stride = skl_plane_stride(plane_state, 1); - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; u32 x = plane_state->color_plane[color_plane].x; u32 y = plane_state->color_plane[color_plane].y; - u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; - u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; - struct intel_plane *linked = plane_state->planar_linked_plane; - const struct drm_framebuffer *fb = plane_state->base.fb; - u8 alpha = plane_state->base.alpha >> 8; + u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + const struct drm_framebuffer *fb = plane_state->hw.fb; + u8 alpha = plane_state->hw.alpha >> 8; u32 plane_color_ctl = 0; unsigned long irqflags; u32 keymsk, keymax; + u32 plane_ctl = plane_state->ctl; plane_ctl |= skl_plane_ctl_crtc(crtc_state); @@ -574,29 +625,13 @@ skl_program_plane(struct intel_plane *plane, I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); - I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), - (plane_state->color_plane[1].offset - surf_addr) | aux_stride); - if (icl_is_hdr_plane(dev_priv, plane_id)) { - u32 cus_ctl = 0; - - if (linked) { - /* Enable and use MPEG-2 chroma siting */ - cus_ctl = PLANE_CUS_ENABLE | - PLANE_CUS_HPHASE_0 | - PLANE_CUS_VPHASE_SIGN_NEGATIVE | - PLANE_CUS_VPHASE_0_25; - - if (linked->id == PLANE_SPRITE5) - cus_ctl |= PLANE_CUS_PLANE_7; - else if (linked->id == PLANE_SPRITE4) - cus_ctl |= PLANE_CUS_PLANE_6; - else - MISSING_CASE(linked->id); - } + if (INTEL_GEN(dev_priv) < 12) + aux_dist |= aux_stride; + I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist); - I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl); - } + if (icl_is_hdr_plane(dev_priv, plane_id)) + I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); @@ -626,7 +661,7 @@ skl_program_plane(struct intel_plane *plane, I915_WRITE_FW(PLANE_SURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + surf_addr); - if (!slave && plane_state->scaler_id >= 0) + if (plane_state->scaler_id >= 0) skl_program_scaler(plane, crtc_state, plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -639,24 +674,12 @@ skl_update_plane(struct intel_plane *plane, { int color_plane = 0; - if (plane_state->planar_linked_plane) { - /* Program the UV plane */ + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + /* Program the UV plane on planar master */ color_plane = 1; - } - skl_program_plane(plane, crtc_state, plane_state, - color_plane, false, plane_state->ctl); + skl_program_plane(plane, crtc_state, plane_state, color_plane); } - -static void -icl_update_slave(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - skl_program_plane(plane, crtc_state, plane_state, 0, true, - plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE); -} - static void skl_disable_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) @@ -716,9 +739,9 @@ static void i9xx_plane_linear_gamma(u16 gamma[8]) static void chv_update_csc(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id plane_id = plane->id; /* * |r| | c0 c1 c2 | |cr| @@ -744,7 +767,7 @@ chv_update_csc(const struct intel_plane_state *plane_state) 0, 4096, 7601, }, }; - const s16 *csc = csc_matrix[plane_state->base.color_encoding]; + const s16 *csc = csc_matrix[plane_state->hw.color_encoding]; /* Seems RGB data bypasses the CSC always */ if (!fb->format->is_yuv) @@ -775,15 +798,15 @@ chv_update_csc(const struct intel_plane_state *plane_state) static void vlv_update_clrc(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; int contrast, brightness, sh_scale, sh_sin, sh_cos; if (fb->format->is_yuv && - plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) { + plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) { /* * Expand limited range to full range: * Contrast is applied first and is used to expand Y range. @@ -811,6 +834,85 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); } +static void +vlv_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + /* + * VLV bspec only considers cases where all three planes are + * enabled, and cases where the primary and one sprite is enabled. + * Let's assume the case with just two sprites enabled also + * maps to the latter case. + */ + if (hweight8(active_planes) == 3) { + switch (cpp) { + case 8: + *num = 11; + *den = 8; + break; + case 4: + *num = 18; + *den = 16; + break; + default: + *num = 1; + *den = 1; + break; + } + } else if (hweight8(active_planes) == 2) { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + case 4: + *num = 17; + *den = 16; + break; + default: + *num = 1; + *den = 1; + break; + } + } else { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } +} + +int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + vlv_plane_ratio(crtc_state, plane_state, &num, &den); + + return DIV_ROUND_UP(pixel_rate * num, den); +} + static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 sprctl = 0; @@ -824,8 +926,8 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprctl; @@ -844,6 +946,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_VYUY: sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY; break; + case DRM_FORMAT_C8: + sprctl |= SP_FORMAT_8BPP; + break; case DRM_FORMAT_RGB565: sprctl |= SP_FORMAT_BGR565; break; @@ -859,6 +964,12 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_ABGR2101010: sprctl |= SP_FORMAT_RGBA1010102; break; + case DRM_FORMAT_XRGB2101010: + sprctl |= SP_FORMAT_BGRX1010102; + break; + case DRM_FORMAT_ARGB2101010: + sprctl |= SP_FORMAT_BGRA1010102; + break; case DRM_FORMAT_XBGR8888: sprctl |= SP_FORMAT_RGBX8888; break; @@ -870,7 +981,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, return 0; } - if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) + if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SP_YUV_FORMAT_BT709; if (fb->modifier == I915_FORMAT_MOD_X_TILED) @@ -890,9 +1001,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, static void vlv_update_gamma(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; u16 gamma[8]; @@ -924,10 +1035,10 @@ vlv_update_plane(struct intel_plane *plane, u32 sprsurf_offset = plane_state->color_plane[0].offset; u32 linear_offset; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - u32 crtc_w = drm_rect_width(&plane_state->base.dst); - u32 crtc_h = drm_rect_height(&plane_state->base.dst); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); + u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u32 x = plane_state->color_plane[0].x; u32 y = plane_state->color_plane[0].y; unsigned long irqflags; @@ -1017,6 +1128,164 @@ vlv_plane_get_hw_state(struct intel_plane *plane, return ret; } +static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + if (hweight8(active_planes) == 2) { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + case 4: + *num = 17; + *den = 16; + break; + default: + *num = 1; + *den = 1; + break; + } + } else { + switch (cpp) { + case 8: + *num = 9; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } +} + +static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + switch (cpp) { + case 8: + *num = 12; + *den = 8; + break; + case 4: + *num = 19; + *den = 16; + break; + case 2: + *num = 33; + *den = 32; + break; + default: + *num = 1; + *den = 1; + break; + } +} + +int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + ivb_plane_ratio(crtc_state, plane_state, &num, &den); + + return DIV_ROUND_UP(pixel_rate * num, den); +} + +static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int src_w, dst_w, pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + dst_w = drm_rect_width(&plane_state->uapi.dst); + + if (src_w != dst_w) + ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den); + else + ivb_plane_ratio(crtc_state, plane_state, &num, &den); + + /* Horizontal downscaling limits the maximum pixel rate */ + dst_w = min(src_w, dst_w); + + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w), + den * dst_w); +} + +static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + if (hweight8(active_planes) == 2) { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } else { + switch (cpp) { + case 8: + *num = 9; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } +} + +int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate = crtc_state->pixel_rate; + unsigned int num, den; + + hsw_plane_ratio(crtc_state, plane_state, &num, &den); + + return DIV_ROUND_UP(pixel_rate * num, den); +} + static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 sprctl = 0; @@ -1030,13 +1299,23 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) return sprctl; } +static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + return fb->format->cpp[0] == 8 && + (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)); +} + static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 sprctl; @@ -1052,6 +1331,18 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888; break; + case DRM_FORMAT_XBGR2101010: + sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX; + break; + case DRM_FORMAT_XRGB2101010: + sprctl |= SPRITE_FORMAT_RGBX101010; + break; + case DRM_FORMAT_XBGR16161616F: + sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX; + break; + case DRM_FORMAT_XRGB16161616F: + sprctl |= SPRITE_FORMAT_RGBX161616; + break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; break; @@ -1069,12 +1360,13 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return 0; } - sprctl |= SPRITE_INT_GAMMA_DISABLE; + if (!ivb_need_sprite_gamma(plane_state)) + sprctl |= SPRITE_INT_GAMMA_DISABLE; - if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) + if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; - if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE; if (fb->modifier == I915_FORMAT_MOD_X_TILED) @@ -1091,12 +1383,26 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } -static void ivb_sprite_linear_gamma(u16 gamma[18]) +static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, + u16 gamma[18]) { - int i; + int scale, i; - for (i = 0; i < 17; i++) - gamma[i] = (i << 10) / 16; + /* + * WaFP16GammaEnabling:ivb,hsw + * "Workaround : When using the 64-bit format, the sprite output + * on each color channel has one quarter amplitude. It can be + * brought up to full amplitude by using sprite internal gamma + * correction, pipe gamma correction, or pipe color space + * conversion to multiply the sprite output by four." + */ + scale = 4; + + for (i = 0; i < 16; i++) + gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1); + + gamma[i] = min((scale * i << 10) / 16, 1 << 10); + i++; gamma[i] = 3 << 10; i++; @@ -1104,13 +1410,16 @@ static void ivb_sprite_linear_gamma(u16 gamma[18]) static void ivb_update_gamma(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; u16 gamma[18]; int i; - ivb_sprite_linear_gamma(gamma); + if (!ivb_need_sprite_gamma(plane_state)) + return; + + ivb_sprite_linear_gamma(plane_state, gamma); /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) @@ -1140,14 +1449,14 @@ ivb_update_plane(struct intel_plane *plane, u32 sprsurf_offset = plane_state->color_plane[0].offset; u32 linear_offset; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - u32 crtc_w = drm_rect_width(&plane_state->base.dst); - u32 crtc_h = drm_rect_height(&plane_state->base.dst); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); + u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u32 x = plane_state->color_plane[0].x; u32 y = plane_state->color_plane[0].y; - u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; - u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; u32 sprctl, sprscale = 0; unsigned long irqflags; @@ -1243,6 +1552,53 @@ ivb_plane_get_hw_state(struct intel_plane *plane, return ret; } +static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int hscale, pixel_rate; + unsigned int limit, decimate; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + /* Horizontal downscaling limits the maximum pixel rate */ + hscale = drm_rect_calc_hscale(&plane_state->uapi.src, + &plane_state->uapi.dst, + 0, INT_MAX); + if (hscale < 0x10000) + return pixel_rate; + + /* Decimation steps at 2x,4x,8x,16x */ + decimate = ilog2(hscale >> 16); + hscale >>= decimate; + + /* Starting limit is 90% of cdclk */ + limit = 9; + + /* -10% per decimation step */ + limit -= decimate; + + /* -10% for RGB */ + if (fb->format->cpp[0] >= 4) + limit--; /* -10% for RGB */ + + /* + * We should also do -10% if sprite scaling is enabled + * on the other pipe, but we can't really check for that, + * so we ignore it. + */ + + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale), + limit << 16); +} + static unsigned int g4x_sprite_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -1268,9 +1624,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 dvscntr; @@ -1286,6 +1642,18 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; break; + case DRM_FORMAT_XBGR2101010: + dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR; + break; + case DRM_FORMAT_XRGB2101010: + dvscntr |= DVS_FORMAT_RGBX101010; + break; + case DRM_FORMAT_XBGR16161616F: + dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR; + break; + case DRM_FORMAT_XRGB16161616F: + dvscntr |= DVS_FORMAT_RGBX161616; + break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; break; @@ -1303,10 +1671,10 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, return 0; } - if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) + if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) dvscntr |= DVS_YUV_FORMAT_BT709; - if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE; if (fb->modifier == I915_FORMAT_MOD_X_TILED) @@ -1325,9 +1693,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, static void g4x_update_gamma(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; u16 gamma[8]; int i; @@ -1357,9 +1725,9 @@ static void ilk_sprite_linear_gamma(u16 gamma[17]) static void ilk_update_gamma(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum pipe pipe = plane->pipe; u16 gamma[17]; int i; @@ -1393,14 +1761,14 @@ g4x_update_plane(struct intel_plane *plane, u32 dvssurf_offset = plane_state->color_plane[0].offset; u32 linear_offset; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - u32 crtc_w = drm_rect_width(&plane_state->base.dst); - u32 crtc_h = drm_rect_height(&plane_state->base.dst); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); + u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); u32 x = plane_state->color_plane[0].x; u32 y = plane_state->color_plane[0].y; - u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; - u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; + u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; u32 dvscntr, dvsscale = 0; unsigned long irqflags; @@ -1499,6 +1867,11 @@ static bool intel_fb_scalable(const struct drm_framebuffer *fb) switch (fb->format->format) { case DRM_FORMAT_C8: return false; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + return INTEL_GEN(to_i915(fb->dev)) >= 11; default: return true; } @@ -1508,12 +1881,12 @@ static int g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; - const struct drm_rect *src = &plane_state->base.src; - const struct drm_rect *dst = &plane_state->base.dst; + const struct drm_framebuffer *fb = plane_state->hw.fb; + const struct drm_rect *src = &plane_state->uapi.src; + const struct drm_rect *dst = &plane_state->uapi.dst; int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; unsigned int stride = plane_state->color_plane[0].stride; unsigned int cpp = fb->format->cpp[0]; unsigned int width_bytes; @@ -1569,13 +1942,13 @@ static int g4x_sprite_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int min_scale = DRM_PLANE_HELPER_NO_SCALING; int max_scale = DRM_PLANE_HELPER_NO_SCALING; int ret; - if (intel_fb_scalable(plane_state->base.fb)) { + if (intel_fb_scalable(plane_state->hw.fb)) { if (INTEL_GEN(dev_priv) < 7) { min_scale = 1; max_scale = 16 << 16; @@ -1585,8 +1958,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, } } - ret = drm_atomic_helper_check_plane_state(&plane_state->base, - &crtc_state->base, + ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, + &crtc_state->uapi, min_scale, max_scale, true, true); if (ret) @@ -1596,7 +1969,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); @@ -1617,9 +1990,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, int chv_plane_check_rotation(const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - unsigned int rotation = plane_state->base.rotation; + unsigned int rotation = plane_state->hw.rotation; /* CHV ignores the mirror bit when the rotate bit is set :( */ if (IS_CHERRYVIEW(dev_priv) && @@ -1642,8 +2015,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = drm_atomic_helper_check_plane_state(&plane_state->base, - &crtc_state->base, + ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, + &crtc_state->uapi, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true); @@ -1654,7 +2027,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; ret = intel_plane_check_src_coordinates(plane_state); @@ -1669,10 +2042,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; struct drm_format_name_buf format_name; if (!fb) @@ -1727,12 +2100,13 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, } /* Y-tiling is not supported in IF-ID Interlace mode */ - if (crtc_state->base.enable && - crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && + if (crtc_state->hw.enable && + crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && (fb->modifier == I915_FORMAT_MOD_Y_TILED || fb->modifier == I915_FORMAT_MOD_Yf_TILED || fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) { + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS)) { DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; } @@ -1744,9 +2118,9 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = - to_i915(plane_state->base.plane->dev); - int crtc_x = plane_state->base.dst.x1; - int crtc_w = drm_rect_width(&plane_state->base.dst); + to_i915(plane_state->uapi.plane->dev); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); int pipe_src_w = crtc_state->pipe_src_w; /* @@ -1772,12 +2146,13 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) { - const struct drm_framebuffer *fb = plane_state->base.fb; - unsigned int rotation = plane_state->base.rotation; - int src_w = drm_rect_width(&plane_state->base.src) >> 16; + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; /* Display WA #1106 */ - if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 && + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); @@ -1787,12 +2162,28 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s return 0; } +static int skl_plane_max_scale(struct drm_i915_private *dev_priv, + const struct drm_framebuffer *fb) +{ + /* + * We don't yet know the final source width nor + * whether we can use the HQ scaler mode. Assume + * the best case. + * FIXME need to properly check this later. + */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || + !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 0x30000 - 1; + else + return 0x20000 - 1; +} + static int skl_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; int min_scale = DRM_PLANE_HELPER_NO_SCALING; int max_scale = DRM_PLANE_HELPER_NO_SCALING; int ret; @@ -1804,11 +2195,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, /* use scaler when colorkey is not required */ if (!plane_state->ckey.flags && intel_fb_scalable(fb)) { min_scale = 1; - max_scale = skl_max_scale(crtc_state, fb->format); + max_scale = skl_plane_max_scale(dev_priv, fb); } - ret = drm_atomic_helper_check_plane_state(&plane_state->base, - &crtc_state->base, + ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, + &crtc_state->uapi, min_scale, max_scale, true, true); if (ret) @@ -1818,7 +2209,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; ret = skl_plane_check_dst_coordinates(crtc_state, plane_state); @@ -1834,8 +2225,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, return ret; /* HW only has 8 bits pixel precision, disable plane if invisible */ - if (!(plane_state->base.alpha >> 8)) - plane_state->base.visible = false; + if (!(plane_state->hw.alpha >> 8)) + plane_state->uapi.visible = false; plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); @@ -1843,6 +2234,15 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, plane_state->color_ctl = glk_plane_color_ctl(crtc_state, plane_state); + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + icl_is_hdr_plane(dev_priv, plane->id)) + /* Enable and use MPEG-2 chroma siting */ + plane_state->cus_ctl = PLANE_CUS_ENABLE | + PLANE_CUS_HPHASE_0 | + PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25; + else + plane_state->cus_ctl = 0; + return 0; } @@ -1854,7 +2254,7 @@ static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) static void intel_plane_set_ckey(struct intel_plane_state *plane_state, const struct drm_intel_sprite_colorkey *set) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_intel_sprite_colorkey *key = &plane_state->ckey; @@ -1979,8 +2379,12 @@ static const u64 i9xx_plane_format_modifiers[] = { }; static const u32 snb_plane_formats[] = { - DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -1988,12 +2392,30 @@ static const u32 snb_plane_formats[] = { }; static const u32 vlv_plane_formats[] = { + DRM_FORMAT_C8, DRM_FORMAT_RGB565, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + +static const u32 chv_pipe_b_sprite_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, @@ -2010,6 +2432,8 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2025,6 +2449,8 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2041,6 +2467,8 @@ static const u32 glk_planar_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2060,6 +2488,8 @@ static const u32 icl_sdr_y_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2081,6 +2511,8 @@ static const u32 icl_sdr_uv_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2106,6 +2538,8 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, DRM_FORMAT_XRGB16161616F, DRM_FORMAT_XBGR16161616F, DRM_FORMAT_ARGB16161616F, @@ -2144,7 +2578,8 @@ static const u64 skl_plane_format_modifiers_ccs[] = { DRM_FORMAT_MOD_INVALID }; -static const u64 gen12_plane_format_modifiers_noccs[] = { +static const u64 gen12_plane_format_modifiers_ccs[] = { + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED, DRM_FORMAT_MOD_LINEAR, @@ -2191,6 +2626,10 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2216,6 +2655,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, } switch (format) { + case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ARGB8888: @@ -2223,6 +2663,8 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2267,6 +2709,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2306,6 +2750,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: break; default: return false; @@ -2316,9 +2761,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: + if (is_ccs_modifier(modifier)) + return true; + /* fall through */ case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2481,7 +2931,7 @@ struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - static const struct drm_plane_funcs *plane_funcs; + const struct drm_plane_funcs *plane_funcs; struct intel_plane *plane; enum drm_plane_type plane_type; unsigned int supported_rotations; @@ -2511,8 +2961,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->disable_plane = skl_disable_plane; plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; - if (icl_is_nv12_y_plane(plane_id)) - plane->update_slave = icl_update_slave; + plane->min_cdclk = skl_plane_min_cdclk; if (INTEL_GEN(dev_priv) >= 11) formats = icl_get_plane_formats(dev_priv, pipe, @@ -2524,13 +2973,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); + plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (INTEL_GEN(dev_priv) >= 12) { - /* TODO: Implement support for gen-12 CCS modifiers */ - plane->has_ccs = false; - modifiers = gen12_plane_format_modifiers_noccs; + modifiers = gen12_plane_format_modifiers_ccs; plane_funcs = &gen12_plane_funcs; } else { - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (plane->has_ccs) modifiers = skl_plane_format_modifiers_ccs; else @@ -2618,9 +3065,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->disable_plane = vlv_disable_plane; plane->get_hw_state = vlv_plane_get_hw_state; plane->check_plane = vlv_sprite_check; + plane->min_cdclk = vlv_plane_min_cdclk; - formats = vlv_plane_formats; - num_formats = ARRAY_SIZE(vlv_plane_formats); + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + formats = chv_pipe_b_sprite_formats; + num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats); + } else { + formats = vlv_plane_formats; + num_formats = ARRAY_SIZE(vlv_plane_formats); + } modifiers = i9xx_plane_format_modifiers; plane_funcs = &vlv_sprite_funcs; @@ -2631,6 +3084,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->get_hw_state = ivb_plane_get_hw_state; plane->check_plane = g4x_sprite_check; + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + plane->min_cdclk = hsw_plane_min_cdclk; + else + plane->min_cdclk = ivb_sprite_min_cdclk; + formats = snb_plane_formats; num_formats = ARRAY_SIZE(snb_plane_formats); modifiers = i9xx_plane_format_modifiers; @@ -2642,6 +3100,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->disable_plane = g4x_disable_plane; plane->get_hw_state = g4x_plane_get_hw_state; plane->check_plane = g4x_sprite_check; + plane->min_cdclk = g4x_sprite_min_cdclk; modifiers = i9xx_plane_format_modifiers; if (IS_GEN(dev_priv, 6)) { diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 229336214f68..5eeaa92420d1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -49,4 +49,11 @@ static inline u8 icl_hdr_plane_mask(void) bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id); +int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + #endif /* __INTEL_SPRITE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 70726b481244..50703536436c 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -924,7 +924,7 @@ intel_enable_tv(struct intel_encoder *encoder, /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_wait_for_vblank(dev_priv, - to_intel_crtc(pipe_config->base.crtc)->pipe); + to_intel_crtc(pipe_config->uapi.crtc)->pipe); I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); } @@ -1085,7 +1085,7 @@ intel_tv_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; struct drm_display_mode mode = {}; u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp; struct tv_mode tv_mode = {}; @@ -1188,7 +1188,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, to_intel_tv_connector_state(conn_state); const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; int hdisplay = adjusted_mode->crtc_hdisplay; int vdisplay = adjusted_mode->crtc_vdisplay; @@ -1417,7 +1417,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_tv *intel_tv = enc_to_tv(encoder); const struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); @@ -1701,7 +1701,7 @@ intel_tv_detect(struct drm_connector *connector, struct intel_load_detect_pipe tmp; int ret; - ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx); + ret = intel_get_load_detect_pipe(connector, &tmp, ctx); if (ret < 0) return ret; @@ -1947,7 +1947,7 @@ intel_tv_init(struct drm_i915_private *dev_priv) intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = PORT_NONE; - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B); + intel_encoder->pipe_mask = ~0; intel_encoder->cloneable = 0; intel_tv->type = DRM_MODE_CONNECTOR_Unknown; diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index e3045ced4bfe..4d0c23b29248 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -114,6 +114,8 @@ enum bdb_block_id { BDB_LVDS_POWER = 44, BDB_MIPI_CONFIG = 52, BDB_MIPI_SEQUENCE = 53, + BDB_COMPRESSION_PARAMETERS = 56, + BDB_GENERIC_DTD = 58, BDB_SKIP = 254, /* VBIOS private block, ignore */ }; @@ -367,7 +369,7 @@ struct child_device_config { u16 dtd_buf_ptr; /* 161 */ u8 edidless_efp:1; /* 161 */ u8 compression_enable:1; /* 198 */ - u8 compression_method:1; /* 198 */ + u8 compression_method_cps:1; /* 198 */ u8 ganged_edp:1; /* 202 */ u8 reserved0:4; u8 compression_structure_index:4; /* 198 */ @@ -792,6 +794,35 @@ struct bdb_lfp_backlight_data { } __packed; /* + * Block 44 - LFP Power Conservation Features Block + */ + +struct als_data_entry { + u16 backlight_adjust; + u16 lux; +} __packed; + +struct agressiveness_profile_entry { + u8 dpst_agressiveness : 4; + u8 lace_agressiveness : 4; +} __packed; + +struct bdb_lfp_power { + u8 lfp_feature_bits; + struct als_data_entry als[5]; + u8 lace_aggressiveness_profile; + u16 dpst; + u16 psr; + u16 drrs; + u16 lace_support; + u16 adt; + u16 dmrrs; + u16 adb; + u16 lace_enabled_status; + struct agressiveness_profile_entry aggressivenes[16]; +} __packed; + +/* * Block 52 - MIPI Configuration Block */ @@ -811,4 +842,85 @@ struct bdb_mipi_sequence { u8 data[0]; /* up to 6 variable length blocks */ } __packed; +/* + * Block 56 - Compression Parameters + */ + +#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0 +#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1 +#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2 +#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3 + +#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */ +#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2) + +struct dsc_compression_parameters_entry { + u8 version_major:4; + u8 version_minor:4; + + u8 rc_buffer_block_size:2; + u8 reserved1:6; + + /* + * Buffer size in bytes: + * + * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes + */ + u8 rc_buffer_size; + u32 slices_per_line; + + u8 line_buffer_depth:4; + u8 reserved2:4; + + /* Flag Bits 1 */ + u8 block_prediction_enable:1; + u8 reserved3:7; + + u8 max_bpp; /* mapping */ + + /* Color depth capabilities */ + u8 reserved4:1; + u8 support_8bpc:1; + u8 support_10bpc:1; + u8 support_12bpc:1; + u8 reserved5:4; + + u16 slice_height; +} __packed; + +struct bdb_compression_parameters { + u16 entry_size; + struct dsc_compression_parameters_entry data[16]; +} __packed; + +/* + * Block 58 - Generic DTD Block + */ + +struct generic_dtd_entry { + u32 pixel_clock; + u16 hactive; + u16 hblank; + u16 hfront_porch; + u16 hsync; + u16 vactive; + u16 vblank; + u16 vfront_porch; + u16 vsync; + u16 width_mm; + u16 height_mm; + + /* Flags */ + u8 rsvd_flags:6; + u8 vsync_positive_polarity:1; + u8 hsync_positive_polarity:1; + + u8 rsvd[3]; +} __packed; + +struct bdb_generic_dtd { + u16 gdtd_size; + struct generic_dtd_entry dtd[]; /* up to 24 DTD's */ +} __packed; + #endif /* _INTEL_VBT_DEFS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index d4fb7f16f9f6..6bab08db5d75 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -10,6 +10,7 @@ #include "i915_drv.h" #include "intel_display_types.h" +#include "intel_dsi.h" #include "intel_vdsc.h" enum ROW_INDEX_BPP { @@ -30,10 +31,8 @@ enum COLUMN_INDEX_BPC { MAX_COLUMN_INDEX }; -#define DSC_SUPPORTED_VERSION_MIN 1 - /* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */ -static u16 rc_buf_thresh[] = { +static const u16 rc_buf_thresh[] = { 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, 7744, 7872, 8000, 8064 }; @@ -53,7 +52,7 @@ struct rc_parameters { * Selected Rate Control Related Parameter Recommended Values * from DSC_v1.11 spec & C Model release: DSC_model_20161212 */ -static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = { +static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = { { /* 6BPP/8BPC */ { 768, 15, 6144, 3, 13, 11, 11, { @@ -319,63 +318,84 @@ static int get_column_index_for_rc_params(u8 bits_per_component) } } -int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config) +static const struct rc_parameters *get_rc_params(u16 compressed_bpp, + u8 bits_per_component) +{ + int row_index, column_index; + + row_index = get_row_index_for_rc_params(compressed_bpp); + if (row_index < 0) + return NULL; + + column_index = get_column_index_for_rc_params(bits_per_component); + if (column_index < 0) + return NULL; + + return &rc_parameters[row_index][column_index]; +} + +bool intel_dsc_source_support(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + enum pipe pipe = crtc->pipe; + + if (!INTEL_INFO(i915)->display.has_dsc) + return false; + + /* On TGL, DSC is supported on all Pipes */ + if (INTEL_GEN(i915) >= 12) + return true; + + if (INTEL_GEN(i915) >= 10 && + (pipe != PIPE_A || + (cpu_transcoder == TRANSCODER_EDP || + cpu_transcoder == TRANSCODER_DSI_0 || + cpu_transcoder == TRANSCODER_DSI_1))) + return true; + + return false; +} + +static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state) { - struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg; - u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp; + const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (INTEL_GEN(i915) >= 12) + return true; + + if (cpu_transcoder == TRANSCODER_EDP || + cpu_transcoder == TRANSCODER_DSI_0 || + cpu_transcoder == TRANSCODER_DSI_1) + return false; + + /* There's no pipe A DSC engine on ICL */ + WARN_ON(crtc->pipe == PIPE_A); + + return true; +} + +int intel_dsc_compute_params(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; + u16 compressed_bpp = pipe_config->dsc.compressed_bpp; + const struct rc_parameters *rc_params; u8 i = 0; - int row_index = 0; - int column_index = 0; - u8 line_buf_depth = 0; - vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay; - vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay; + vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; + vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, - pipe_config->dsc_params.slice_count); - /* - * Slice Height of 8 works for all currently available panels. So start - * with that if pic_height is an integral multiple of 8. - * Eventually add logic to try multiple slice heights. - */ - if (vdsc_cfg->pic_height % 8 == 0) - vdsc_cfg->slice_height = 8; - else if (vdsc_cfg->pic_height % 4 == 0) - vdsc_cfg->slice_height = 4; - else - vdsc_cfg->slice_height = 2; - - /* Values filled from DSC Sink DPCD */ - vdsc_cfg->dsc_version_major = - (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & - DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; - vdsc_cfg->dsc_version_minor = - min(DSC_SUPPORTED_VERSION_MIN, - (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & - DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); - - vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & - DP_DSC_RGB; - - line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); - if (!line_buf_depth) { - DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); - return -EINVAL; - } - if (vdsc_cfg->dsc_version_minor == 2) - vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? - DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; - else - vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? - DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; + pipe_config->dsc.slice_count); /* Gen 11 does not support YCbCr */ vdsc_cfg->simple_422 = false; /* Gen 11 does not support VBR */ vdsc_cfg->vbr_enable = false; - vdsc_cfg->block_pred_enable = - intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & - DP_DSC_BLK_PREDICTION_IS_SUPPORTED; /* Gen 11 only supports integral values of bpp */ vdsc_cfg->bits_per_pixel = compressed_bpp << 4; @@ -399,39 +419,29 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, vdsc_cfg->rc_buf_thresh[13] = 0x7D; } - row_index = get_row_index_for_rc_params(compressed_bpp); - column_index = - get_column_index_for_rc_params(vdsc_cfg->bits_per_component); - - if (row_index < 0 || column_index < 0) + rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component); + if (!rc_params) return -EINVAL; - vdsc_cfg->first_line_bpg_offset = - rc_params[row_index][column_index].first_line_bpg_offset; - vdsc_cfg->initial_xmit_delay = - rc_params[row_index][column_index].initial_xmit_delay; - vdsc_cfg->initial_offset = - rc_params[row_index][column_index].initial_offset; - vdsc_cfg->flatness_min_qp = - rc_params[row_index][column_index].flatness_min_qp; - vdsc_cfg->flatness_max_qp = - rc_params[row_index][column_index].flatness_max_qp; - vdsc_cfg->rc_quant_incr_limit0 = - rc_params[row_index][column_index].rc_quant_incr_limit0; - vdsc_cfg->rc_quant_incr_limit1 = - rc_params[row_index][column_index].rc_quant_incr_limit1; + vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset; + vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay; + vdsc_cfg->initial_offset = rc_params->initial_offset; + vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp; + vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp; + vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0; + vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1; for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { vdsc_cfg->rc_range_params[i].range_min_qp = - rc_params[row_index][column_index].rc_range_params[i].range_min_qp; + rc_params->rc_range_params[i].range_min_qp; vdsc_cfg->rc_range_params[i].range_max_qp = - rc_params[row_index][column_index].rc_range_params[i].range_max_qp; + rc_params->rc_range_params[i].range_max_qp; /* * Range BPG Offset uses 2's complement and is only a 6 bits. So * mask it to get only 6 bits. */ vdsc_cfg->rc_range_params[i].range_bpg_offset = - rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset & + rc_params->rc_range_params[i].range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } @@ -453,45 +463,46 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); - return drm_dsc_compute_rc_parameters(vdsc_cfg); + return 0; } enum intel_display_power_domain intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; /* - * On ICL VDSC/joining for eDP transcoder uses a separate power well, - * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain. - * For any other transcoder, VDSC/joining uses the power well associated - * with the pipe/transcoder in use. Hence another reference on the - * transcoder power domain will suffice. + * VDSC/joining uses a separate power well, PW2, and requires + * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases: * - * On TGL we have the same mapping, but for transcoder A (the special - * TRANSCODER_EDP is gone). + * - ICL eDP/DSI transcoder + * - TGL pipe A + * + * For any other pipe, VDSC/joining uses the power well associated with + * the pipe in use. Hence another reference on the pipe power domain + * will suffice. (Except no VDSC/joining on ICL pipe A.) */ - if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A) - return POWER_DOMAIN_TRANSCODER_VDSC_PW2; - else if (cpu_transcoder == TRANSCODER_EDP) + if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A) return POWER_DOMAIN_TRANSCODER_VDSC_PW2; + else if (is_pipe_dsc(crtc_state)) + return POWER_DOMAIN_PIPE(pipe); else - return POWER_DOMAIN_TRANSCODER(cpu_transcoder); + return POWER_DOMAIN_TRANSCODER_VDSC_PW2; } -static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static void intel_dsc_pps_configure(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; enum pipe pipe = crtc->pipe; - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 pps_val = 0; u32 rc_buf_thresh_dword[4]; u32 rc_range_params_dword[8]; - u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1; + u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1; int i = 0; /* Populate PICTURE_PARAMETER_SET_0 registers */ @@ -508,17 +519,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; DRM_INFO("PPS0 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), pps_val); } @@ -527,17 +538,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val = 0; pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); DRM_INFO("PPS1 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), pps_val); } @@ -547,17 +558,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); DRM_INFO("PPS2 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), pps_val); } @@ -567,17 +578,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); DRM_INFO("PPS3 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), pps_val); } @@ -587,17 +598,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); DRM_INFO("PPS4 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), pps_val); } @@ -607,17 +618,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); DRM_INFO("PPS5 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), pps_val); } @@ -629,17 +640,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); DRM_INFO("PPS6 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), pps_val); } @@ -649,17 +660,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); DRM_INFO("PPS7 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), pps_val); } @@ -669,17 +680,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); DRM_INFO("PPS8 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), pps_val); } @@ -689,17 +700,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); DRM_INFO("PPS9 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), pps_val); } @@ -711,17 +722,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); DRM_INFO("PPS10 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), pps_val); } @@ -734,17 +745,17 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); DRM_INFO("PPS16 = 0x%08x\n", pps_val); - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), pps_val); } @@ -758,12 +769,12 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i, rc_buf_thresh_dword[i / 4]); } - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(DSCC_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW, @@ -782,7 +793,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, rc_buf_thresh_dword[2]); I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), rc_buf_thresh_dword[3]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe), rc_buf_thresh_dword[0]); I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), @@ -807,7 +818,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i, rc_range_params_dword[i / 2]); } - if (cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW, @@ -824,7 +835,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, rc_range_params_dword[6]); I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW, rc_range_params_dword[7]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW, @@ -859,7 +870,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, rc_range_params_dword[6]); I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), rc_range_params_dword[7]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), rc_range_params_dword[0]); I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), @@ -880,12 +891,79 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, } } -static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; + u32 dss_ctl1, dss_ctl2, val; + + if (!intel_dsc_source_support(encoder, crtc_state)) + return; + + power_domain = intel_dsc_power_domain(crtc_state); + + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) + return; + + if (!is_pipe_dsc(crtc_state)) { + dss_ctl1 = I915_READ(DSS_CTL1); + dss_ctl2 = I915_READ(DSS_CTL2); + } else { + dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe)); + dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe)); + } + + crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE; + if (!crtc_state->dsc.compression_enable) + goto out; + + crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) && + (dss_ctl1 & JOINER_ENABLE); + + /* FIXME: add more state readout as needed */ + + /* PPS1 */ + if (!is_pipe_dsc(crtc_state)) + val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1); + else + val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); + vdsc_cfg->bits_per_pixel = val; + crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; +out: + intel_display_power_put(dev_priv, power_domain, wakeref); +} + +static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi; + struct drm_dsc_picture_parameter_set pps; + enum port port; + + drm_dsc_pps_payload_pack(&pps, vdsc_cfg); + + for_each_dsi_port(port, intel_dsi->ports) { + dsi = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_picture_parameter_set(dsi, &pps); + mipi_dsi_compression_mode(dsi, true); + } +} + +static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ @@ -902,25 +980,28 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, void intel_dsc_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1_val = 0; u32 dss_ctl2_val = 0; - if (!crtc_state->dsc_params.compression_enable) + if (!crtc_state->dsc.compression_enable) return; /* Enable Power wells for VDSC/joining */ intel_display_power_get(dev_priv, intel_dsc_power_domain(crtc_state)); - intel_configure_pps_for_dsc_encoder(encoder, crtc_state); + intel_dsc_pps_configure(encoder, crtc_state); - intel_dp_write_dsc_pps_sdp(encoder, crtc_state); + if (encoder->type == INTEL_OUTPUT_DSI) + intel_dsc_dsi_pps_write(encoder, crtc_state); + else + intel_dsc_dp_pps_write(encoder, crtc_state); - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(crtc_state)) { dss_ctl1_reg = DSS_CTL1; dss_ctl2_reg = DSS_CTL2; } else { @@ -928,7 +1009,7 @@ void intel_dsc_enable(struct intel_encoder *encoder, dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); } dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; dss_ctl1_val |= JOINER_ENABLE; } @@ -938,16 +1019,16 @@ void intel_dsc_enable(struct intel_encoder *encoder, void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1_val = 0, dss_ctl2_val = 0; - if (!old_crtc_state->dsc_params.compression_enable) + if (!old_crtc_state->dsc.compression_enable) return; - if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) { + if (!is_pipe_dsc(old_crtc_state)) { dss_ctl1_reg = DSS_CTL1; dss_ctl2_reg = DSS_CTL2; } else { diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 90d3f6017fcb..e56a3254c214 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -6,15 +6,20 @@ #ifndef __INTEL_VDSC_H__ #define __INTEL_VDSC_H__ +#include <linux/types.h> + struct intel_encoder; struct intel_crtc_state; -struct intel_dp; +bool intel_dsc_source_support(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_dsc_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); -int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config); +int intel_dsc_compute_params(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config); +void intel_dsc_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 50064cde0724..21e820299107 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -261,9 +261,9 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; DRM_DEBUG_KMS("\n"); @@ -624,7 +624,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -746,7 +746,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - struct drm_crtc *crtc = pipe_config->base.crtc; + struct drm_crtc *crtc = pipe_config->uapi.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; @@ -882,8 +882,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) } static void intel_dsi_post_disable(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -892,6 +892,12 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, DRM_DEBUG_KMS("\n"); + if (IS_GEN9_LP(dev_priv)) { + intel_crtc_vblank_off(old_crtc_state); + + skylake_scaler_disable(old_crtc_state); + } + if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); @@ -1032,9 +1038,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; + &pipe_config->hw.adjusted_mode; struct drm_display_mode *adjusted_mode_sw; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); unsigned int lane_count = intel_dsi->lane_count; unsigned int bpp, fmt; @@ -1045,7 +1051,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, crtc_hblank_start_sw, crtc_hblank_end_sw; /* FIXME: hw readout should not depend on SW state */ - adjusted_mode_sw = &crtc->config->base.adjusted_mode; + adjusted_mode_sw = &crtc->config->hw.adjusted_mode; /* * Atleast one port is active as encoder->get_config called only if @@ -1204,7 +1210,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, } if (pclk) { - pipe_config->base.adjusted_mode.crtc_clock = pclk; + pipe_config->hw.adjusted_mode.crtc_clock = pclk; pipe_config->port_clock = pclk; } } @@ -1315,9 +1321,9 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 val, tmp; @@ -1870,11 +1876,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) * port C. BXT isn't limited like this. */ if (IS_GEN9_LP(dev_priv)) - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + intel_encoder->pipe_mask = ~0; else if (port == PORT_A) - intel_encoder->crtc_mask = BIT(PIPE_A); + intel_encoder->pipe_mask = BIT(PIPE_A); else - intel_encoder->crtc_mask = BIT(PIPE_B); + intel_encoder->pipe_mask = BIT(PIPE_B); if (dev_priv->vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index b9f504ba3b32..34be4c0ee7c5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -20,33 +20,31 @@ static void __do_clflush(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); drm_clflush_sg(obj->mm.pages); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); } static int clflush_work(struct dma_fence_work *base) { struct clflush *clflush = container_of(base, typeof(*clflush), base); - struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj); + struct drm_i915_gem_object *obj = clflush->obj; int err; err = i915_gem_object_pin_pages(obj); if (err) - goto put; + return err; __do_clflush(obj); i915_gem_object_unpin_pages(obj); -put: - i915_gem_object_put(obj); - return err; + return 0; } static void clflush_release(struct dma_fence_work *base) { struct clflush *clflush = container_of(base, typeof(*clflush), base); - if (clflush->obj) - i915_gem_object_put(clflush->obj); + i915_gem_object_put(clflush->obj); } static const struct dma_fence_work_ops clflush_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 7b01f4605f21..dc90b044a217 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -69,8 +69,12 @@ #include <drm/i915_drm.h> -#include "gt/intel_lrc_reg.h" +#include "gt/intel_context.h" +#include "gt/intel_engine_heartbeat.h" +#include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" +#include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" #include "i915_gem_context.h" #include "i915_globals.h" @@ -167,12 +171,80 @@ lookup_user_engine(struct i915_gem_context *ctx, return i915_gem_context_get_engine(ctx, idx); } +static struct i915_address_space * +context_get_vm_rcu(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); + + do { + struct i915_address_space *vm; + + /* + * We do not allow downgrading from full-ppgtt [to a shared + * global gtt], so ctx->vm cannot become NULL. + */ + vm = rcu_dereference(ctx->vm); + if (!kref_get_unless_zero(&vm->ref)) + continue; + + /* + * This ppgtt may have be reallocated between + * the read and the kref, and reassigned to a third + * context. In order to avoid inadvertent sharing + * of this ppgtt with that third context (and not + * src), we have to confirm that we have the same + * ppgtt after passing through the strong memory + * barrier implied by a successful + * kref_get_unless_zero(). + * + * Once we have acquired the current ppgtt of ctx, + * we no longer care if it is released from ctx, as + * it cannot be reallocated elsewhere. + */ + + if (vm == rcu_access_pointer(ctx->vm)) + return rcu_pointer_handoff(vm); + + i915_vm_put(vm); + } while (1); +} + +static void intel_context_set_gem(struct intel_context *ce, + struct i915_gem_context *ctx) +{ + GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); + RCU_INIT_POINTER(ce->gem_context, ctx); + + if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) + ce->ring = __intel_context_ring_size(SZ_16K); + + if (rcu_access_pointer(ctx->vm)) { + struct i915_address_space *vm; + + rcu_read_lock(); + vm = context_get_vm_rcu(ctx); /* hmm */ + rcu_read_unlock(); + + i915_vm_put(ce->vm); + ce->vm = vm; + } + + GEM_BUG_ON(ce->timeline); + if (ctx->timeline) + ce->timeline = intel_timeline_get(ctx->timeline); + + if (ctx->sched.priority >= I915_PRIORITY_NORMAL && + intel_engine_has_semaphores(ce->engine)) + __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); +} + static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { if (!e->engines[count]) continue; + RCU_INIT_POINTER(e->engines[count]->gem_context, NULL); intel_context_put(e->engines[count]); } kfree(e); @@ -209,12 +281,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); GEM_BUG_ON(e->engines[engine->legacy_idx]); - ce = intel_context_create(ctx, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { __free_engines(e, e->num_engines + 1); return ERR_CAST(ce); } + intel_context_set_gem(ce, ctx); + e->engines[engine->legacy_idx] = ce; e->num_engines = max(e->num_engines, engine->legacy_idx); } @@ -237,9 +311,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) if (ctx->timeline) intel_timeline_put(ctx->timeline); - kfree(ctx->name); put_pid(ctx->pid); - mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); @@ -276,11 +348,170 @@ void i915_gem_context_release(struct kref *ref) schedule_work(&gc->free_work); } +static inline struct i915_gem_engines * +__context_engines_static(const struct i915_gem_context *ctx) +{ + return rcu_dereference_protected(ctx->engines, true); +} + +static bool __reset_engine(struct intel_engine_cs *engine) +{ + struct intel_gt *gt = engine->gt; + bool success = false; + + if (!intel_has_reset_engine(gt)) + return false; + + if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) { + success = intel_engine_reset(engine, NULL) == 0; + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } + + return success; +} + +static void __reset_context(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + intel_gt_handle_error(engine->gt, engine->mask, 0, + "context closure in %s", ctx->name); +} + +static bool __cancel_engine(struct intel_engine_cs *engine) +{ + /* + * Send a "high priority pulse" down the engine to cause the + * current request to be momentarily preempted. (If it fails to + * be preempted, it will be reset). As we have marked our context + * as banned, any incomplete request, including any running, will + * be skipped following the preemption. + * + * If there is no hangchecking (one of the reasons why we try to + * cancel the context) and no forced preemption, there may be no + * means by which we reset the GPU and evict the persistent hog. + * Ergo if we are unable to inject a preemptive pulse that can + * kill the banned context, we fallback to doing a local reset + * instead. + */ + if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && + !intel_engine_pulse(engine)) + return true; + + /* If we are unable to send a pulse, try resetting this engine. */ + return __reset_engine(engine); +} + +static struct intel_engine_cs *__active_engine(struct i915_request *rq) +{ + struct intel_engine_cs *engine, *locked; + + /* + * Serialise with __i915_request_submit() so that it sees + * is-banned?, or we know the request is already inflight. + */ + locked = READ_ONCE(rq->engine); + spin_lock_irq(&locked->active.lock); + while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { + spin_unlock(&locked->active.lock); + spin_lock(&engine->active.lock); + locked = engine; + } + + engine = NULL; + if (i915_request_is_active(rq) && !rq->fence.error) + engine = rq->engine; + + spin_unlock_irq(&locked->active.lock); + + return engine; +} + +static struct intel_engine_cs *active_engine(struct intel_context *ce) +{ + struct intel_engine_cs *engine = NULL; + struct i915_request *rq; + + if (!ce->timeline) + return NULL; + + mutex_lock(&ce->timeline->mutex); + list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { + if (i915_request_completed(rq)) + break; + + /* Check with the backend if the request is inflight */ + engine = __active_engine(rq); + if (engine) + break; + } + mutex_unlock(&ce->timeline->mutex); + + return engine; +} + +static void kill_context(struct i915_gem_context *ctx) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + + /* + * Map the user's engine back to the actual engines; one virtual + * engine will be mapped to multiple engines, and using ctx->engine[] + * the same engine may be have multiple instances in the user's map. + * However, we only care about pending requests, so only include + * engines on which there are incomplete requests. + */ + for_each_gem_engine(ce, __context_engines_static(ctx), it) { + struct intel_engine_cs *engine; + + if (intel_context_set_banned(ce)) + continue; + + /* + * Check the current active state of this context; if we + * are currently executing on the GPU we need to evict + * ourselves. On the other hand, if we haven't yet been + * submitted to the GPU or if everything is complete, + * we have nothing to do. + */ + engine = active_engine(ce); + + /* First attempt to gracefully cancel the context */ + if (engine && !__cancel_engine(engine)) + /* + * If we are unable to send a preemptive pulse to bump + * the context from the GPU, we have to resort to a full + * reset. We hope the collateral damage is worth it. + */ + __reset_context(ctx, engine); + } +} + +static void set_closed_name(struct i915_gem_context *ctx) +{ + char *s; + + /* Replace '[]' with '<>' to indicate closed in debug prints */ + + s = strrchr(ctx->name, '['); + if (!s) + return; + + *s = '<'; + + s = strchr(s + 1, ']'); + if (s) + *s = '>'; +} + static void context_close(struct i915_gem_context *ctx) { struct i915_address_space *vm; i915_gem_context_set_closed(ctx); + set_closed_name(ctx); mutex_lock(&ctx->mutex); @@ -298,9 +529,47 @@ static void context_close(struct i915_gem_context *ctx) lut_close(ctx); mutex_unlock(&ctx->mutex); + + /* + * If the user has disabled hangchecking, we can not be sure that + * the batches will ever complete after the context is closed, + * keeping the context and all resources pinned forever. So in this + * case we opt to forcibly kill off all remaining requests on + * context close. + */ + if (!i915_gem_context_is_persistent(ctx) || + !i915_modparams.enable_hangcheck) + kill_context(ctx); + i915_gem_context_put(ctx); } +static int __context_set_persistence(struct i915_gem_context *ctx, bool state) +{ + if (i915_gem_context_is_persistent(ctx) == state) + return 0; + + if (state) { + /* + * Only contexts that are short-lived [that will expire or be + * reset] are allowed to survive past termination. We require + * hangcheck to ensure that the persistent requests are healthy. + */ + if (!i915_modparams.enable_hangcheck) + return -EINVAL; + + i915_gem_context_set_persistence(ctx); + } else { + /* To cancel a context we use "preempt-to-idle" */ + if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + return -ENODEV; + + i915_gem_context_clear_persistence(ctx); + } + + return 0; +} + static struct i915_gem_context * __create_context(struct drm_i915_private *i915) { @@ -335,6 +604,7 @@ __create_context(struct drm_i915_private *i915) i915_gem_context_set_bannable(ctx); i915_gem_context_set_recoverable(ctx); + __context_set_persistence(ctx, true /* cgroup hook? */); for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -468,36 +738,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) return ctx; } -static void -destroy_kernel_context(struct i915_gem_context **ctxp) -{ - struct i915_gem_context *ctx; - - /* Keep the context ref so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(ctxp)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - - context_close(ctx); - i915_gem_context_free(ctx); -} - -struct i915_gem_context * -i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) -{ - struct i915_gem_context *ctx; - - ctx = i915_gem_create_context(i915, 0); - if (IS_ERR(ctx)) - return ctx; - - i915_gem_context_clear_bannable(ctx); - ctx->sched.priority = I915_USER_PRIORITY(prio); - - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - - return ctx; -} - static void init_contexts(struct i915_gem_contexts *gc) { spin_lock_init(&gc->lock); @@ -507,32 +747,17 @@ static void init_contexts(struct i915_gem_contexts *gc) init_llist_head(&gc->free_list); } -int i915_gem_init_contexts(struct drm_i915_private *i915) +void i915_gem_init__contexts(struct drm_i915_private *i915) { - struct i915_gem_context *ctx; - - /* Reassure ourselves we are only called once */ - GEM_BUG_ON(i915->kernel_context); - init_contexts(&i915->gem.contexts); - - /* lowest priority; idle task */ - ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN); - if (IS_ERR(ctx)) { - DRM_ERROR("Failed to create default global context\n"); - return PTR_ERR(ctx); - } - i915->kernel_context = ctx; - DRM_DEBUG_DRIVER("%s context support initialized\n", DRIVER_CAPS(i915)->has_logical_contexts ? "logical" : "fake"); - return 0; } void i915_gem_driver_release__contexts(struct drm_i915_private *i915) { - destroy_kernel_context(&i915->kernel_context); + flush_work(&i915->gem.contexts.free_work); } static int context_idr_cleanup(int id, void *p, void *data) @@ -562,12 +787,8 @@ static int gem_context_register(struct i915_gem_context *ctx, mutex_unlock(&ctx->mutex); ctx->pid = get_task_pid(current, PIDTYPE_PID); - ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", - current->comm, pid_nr(ctx->pid)); - if (!ctx->name) { - ret = -ENOMEM; - goto err_pid; - } + snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", + current->comm, pid_nr(ctx->pid)); /* And finally expose ourselves to userspace via the idr */ mutex_lock(&fpriv->context_idr_lock); @@ -576,8 +797,6 @@ static int gem_context_register(struct i915_gem_context *ctx, if (ret >= 0) goto out; - kfree(fetch_and_zero(&ctx->name)); -err_pid: put_pid(fetch_and_zero(&ctx->pid)); out: return ret; @@ -606,7 +825,6 @@ int i915_gem_context_open(struct drm_i915_private *i915, if (err < 0) goto err_ctx; - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); GEM_BUG_ON(err > 0); return 0; @@ -817,7 +1035,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, return -ENODEV; rcu_read_lock(); - vm = i915_vm_get(ctx->vm); + vm = context_get_vm_rcu(ctx); rcu_read_unlock(); ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); @@ -854,7 +1072,7 @@ static void set_ppgtt_barrier(void *data) static int emit_ppgtt_update(struct i915_request *rq, void *data) { - struct i915_address_space *vm = rq->hw_context->vm; + struct i915_address_space *vm = rq->context->vm; struct intel_engine_cs *engine = rq->engine; u32 base = engine->mmio_base; u32 *cs; @@ -901,9 +1119,6 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) } *cs++ = MI_NOOP; intel_ring_advance(rq, cs); - } else { - /* ppGTT is not part of the legacy context image */ - gen6_ppgtt_pin(i915_vm_to_ppgtt(vm)); } return 0; @@ -911,10 +1126,20 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) static bool skip_ppgtt_update(struct intel_context *ce, void *data) { + if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) + return true; + if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) - return !ce->state; - else - return !atomic_read(&ce->pin_count); + return false; + + if (!atomic_read(&ce->pin_count)) + return true; + + /* ppGTT is not part of the legacy context image */ + if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) + return true; + + return false; } static int set_ppgtt(struct drm_i915_file_private *file_priv, @@ -947,7 +1172,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, if (i915_gem_context_is_closed(ctx)) { err = -ENOENT; - goto out; + goto unlock; } if (vm == rcu_access_pointer(ctx->vm)) @@ -1022,7 +1247,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) if (!intel_context_is_pinned(ce)) return 0; - rq = i915_request_create(ce->engine->kernel_context); + rq = intel_engine_create_kernel_request(ce->engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -1290,12 +1515,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data) } } - ce = intel_execlists_create_virtual(set->ctx, siblings, n); + ce = intel_execlists_create_virtual(siblings, n); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out_siblings; } + intel_context_set_gem(ce, set->ctx); + if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { intel_context_put(ce); err = -EEXIST; @@ -1465,12 +1692,14 @@ set_engines(struct i915_gem_context *ctx, return -ENOENT; } - ce = intel_context_create(ctx, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { __free_engines(set.engines, n); return PTR_ERR(ce); } + intel_context_set_gem(ce, ctx); + set.engines->engines[n] = ce; } set.engines->num_engines = num_engines; @@ -1492,7 +1721,7 @@ replace: i915_gem_context_set_user_engines(ctx); else i915_gem_context_clear_user_engines(ctx); - rcu_swap_protected(ctx->engines, set.engines, 1); + set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); mutex_unlock(&ctx->engines_mutex); call_rcu(&set.engines->rcu, free_engines_rcu); @@ -1601,6 +1830,54 @@ err_free: return err; } +static int +set_persistence(struct i915_gem_context *ctx, + const struct drm_i915_gem_context_param *args) +{ + if (args->size) + return -EINVAL; + + return __context_set_persistence(ctx, args->value); +} + +static void __apply_priority(struct intel_context *ce, void *arg) +{ + struct i915_gem_context *ctx = arg; + + if (!intel_engine_has_semaphores(ce->engine)) + return; + + if (ctx->sched.priority >= I915_PRIORITY_NORMAL) + intel_context_set_use_semaphores(ce); + else + intel_context_clear_use_semaphores(ce); +} + +static int set_priority(struct i915_gem_context *ctx, + const struct drm_i915_gem_context_param *args) +{ + s64 priority = args->value; + + if (args->size) + return -EINVAL; + + if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + return -ENODEV; + + if (priority > I915_CONTEXT_MAX_USER_PRIORITY || + priority < I915_CONTEXT_MIN_USER_PRIORITY) + return -EINVAL; + + if (priority > I915_CONTEXT_DEFAULT_PRIORITY && + !capable(CAP_SYS_NICE)) + return -EPERM; + + ctx->sched.priority = I915_USER_PRIORITY(priority); + context_apply_all(ctx, __apply_priority, ctx); + + return 0; +} + static int ctx_setparam(struct drm_i915_file_private *fpriv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -1647,23 +1924,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, break; case I915_CONTEXT_PARAM_PRIORITY: - { - s64 priority = args->value; - - if (args->size) - ret = -EINVAL; - else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) - ret = -ENODEV; - else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || - priority < I915_CONTEXT_MIN_USER_PRIORITY) - ret = -EINVAL; - else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && - !capable(CAP_SYS_NICE)) - ret = -EPERM; - else - ctx->sched.priority = - I915_USER_PRIORITY(priority); - } + ret = set_priority(ctx, args); break; case I915_CONTEXT_PARAM_SSEU: @@ -1678,6 +1939,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_engines(ctx, args); break; + case I915_CONTEXT_PARAM_PERSISTENCE: + ret = set_persistence(ctx, args); + break; + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; @@ -1739,20 +2004,23 @@ static int clone_engines(struct i915_gem_context *dst, */ if (intel_engine_is_virtual(engine)) clone->engines[n] = - intel_execlists_clone_virtual(dst, engine); + intel_execlists_clone_virtual(engine); else - clone->engines[n] = intel_context_create(dst, engine); + clone->engines[n] = intel_context_create(engine); if (IS_ERR_OR_NULL(clone->engines[n])) { __free_engines(clone, n); goto err_unlock; } + + intel_context_set_gem(clone->engines[n], dst); } clone->num_engines = n; user_engines = i915_gem_context_user_engines(src); i915_gem_context_unlock_engines(src); - free_engines(dst->engines); + /* Serialised by constructor */ + free_engines(__context_engines_static(dst)); RCU_INIT_POINTER(dst->engines, clone); if (user_engines) i915_gem_context_set_user_engines(dst); @@ -1787,7 +2055,8 @@ static int clone_sseu(struct i915_gem_context *dst, unsigned long n; int err; - clone = dst->engines; /* no locking required; sole access */ + /* no locking required; sole access under constructor*/ + clone = __context_engines_static(dst); if (e->num_engines != clone->num_engines) { err = -EINVAL; goto unlock; @@ -1832,47 +2101,21 @@ static int clone_vm(struct i915_gem_context *dst, struct i915_address_space *vm; int err = 0; - rcu_read_lock(); - do { - vm = rcu_dereference(src->vm); - if (!vm) - break; - - if (!kref_get_unless_zero(&vm->ref)) - continue; - - /* - * This ppgtt may have be reallocated between - * the read and the kref, and reassigned to a third - * context. In order to avoid inadvertent sharing - * of this ppgtt with that third context (and not - * src), we have to confirm that we have the same - * ppgtt after passing through the strong memory - * barrier implied by a successful - * kref_get_unless_zero(). - * - * Once we have acquired the current ppgtt of src, - * we no longer care if it is released from src, as - * it cannot be reallocated elsewhere. - */ - - if (vm == rcu_access_pointer(src->vm)) - break; + if (!rcu_access_pointer(src->vm)) + return 0; - i915_vm_put(vm); - } while (1); + rcu_read_lock(); + vm = context_get_vm_rcu(src); rcu_read_unlock(); - if (vm) { - if (!mutex_lock_interruptible(&dst->mutex)) { - __assign_ppgtt(dst, vm); - mutex_unlock(&dst->mutex); - } else { - err = -EINTR; - } - i915_vm_put(vm); + if (!mutex_lock_interruptible(&dst->mutex)) { + __assign_ppgtt(dst, vm); + mutex_unlock(&dst->mutex); + } else { + err = -EINTR; } + i915_vm_put(vm); return err; } @@ -1958,8 +2201,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ext_data.fpriv = file->driver_priv; if (client_is_banned(ext_data.fpriv)) { DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, - pid_nr(get_task_pid(current, PIDTYPE_PID))); + current->comm, task_pid_nr(current)); return -EIO; } @@ -2130,6 +2372,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ret = get_engines(ctx, args); break; + case I915_CONTEXT_PARAM_PERSISTENCE: + args->size = 0; + args->value = i915_gem_context_is_persistent(ctx); + break; + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index cfe80590f0ed..14f3cc1b7583 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -76,24 +76,19 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); } -static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) +static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx) { - return test_bit(CONTEXT_BANNED, &ctx->flags); + return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); } -static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx) +static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx) { - set_bit(CONTEXT_BANNED, &ctx->flags); + set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); } -static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx) +static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx) { - return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); -} - -static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx) -{ - __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); + clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); } static inline bool @@ -114,31 +109,8 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); } -static inline bool -i915_gem_context_nopreempt(const struct i915_gem_context *ctx) -{ - return test_bit(CONTEXT_NOPREEMPT, &ctx->flags); -} - -static inline void -i915_gem_context_set_nopreempt(struct i915_gem_context *ctx) -{ - set_bit(CONTEXT_NOPREEMPT, &ctx->flags); -} - -static inline void -i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx) -{ - clear_bit(CONTEXT_NOPREEMPT, &ctx->flags); -} - -static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) -{ - return !ctx->file_priv; -} - /* i915_gem_context.c */ -int __must_check i915_gem_init_contexts(struct drm_i915_private *i915); +void i915_gem_init__contexts(struct drm_i915_private *i915); void i915_gem_driver_release__contexts(struct drm_i915_private *i915); int i915_gem_context_open(struct drm_i915_private *i915, @@ -163,9 +135,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -struct i915_gem_context * -i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio); - static inline struct i915_gem_context * i915_gem_context_get(struct i915_gem_context *ctx) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index fe97b8ba4fda..017ca803ab47 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -100,15 +100,6 @@ struct i915_gem_context { */ struct pid *pid; - /** - * @name: arbitrary name - * - * A name is constructed for the context from the creator's process - * name, pid and user handle in order to uniquely identify the - * context in messages. - */ - const char *name; - /** link: place with &drm_i915_private.context_list */ struct list_head link; struct llist_node free_link; @@ -137,16 +128,14 @@ struct i915_gem_context { #define UCONTEXT_NO_ERROR_CAPTURE 1 #define UCONTEXT_BANNABLE 2 #define UCONTEXT_RECOVERABLE 3 +#define UCONTEXT_PERSISTENCE 4 /** * @flags: small set of booleans */ unsigned long flags; -#define CONTEXT_BANNED 0 -#define CONTEXT_CLOSED 1 -#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 -#define CONTEXT_USER_ENGINES 3 -#define CONTEXT_NOPREEMPT 4 +#define CONTEXT_CLOSED 0 +#define CONTEXT_USER_ENGINES 1 struct mutex mutex; @@ -175,6 +164,15 @@ struct i915_gem_context { * per vm, which may be one per context or shared with the global GTT) */ struct radix_tree_root handles_vma; + + /** + * @name: arbitrary name, used for user debug + * + * A name is constructed for the context from the creator's process + * name, pid and user handle in order to uniquely identify the + * context in messages. + */ + char name[TASK_COMM_LEN + 8]; }; #endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index da010c45cb30..372b57ca0efc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -220,6 +220,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { + static struct lock_class_key lock_class; struct dma_buf_attachment *attach; struct drm_i915_gem_object *obj; int ret; @@ -251,7 +252,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, } drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class); obj->base.import_attach = attach; obj->base.resv = dma_buf->resv; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 9937b4c341f1..0cc40e77bbd2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -12,6 +12,8 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_vma.h" +#include "i915_gem_lmem.h" +#include "i915_gem_mman.h" static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) { @@ -148,9 +150,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->read_domains |= I915_GEM_DOMAIN_GTT; if (write) { + struct i915_vma *vma; + obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = I915_GEM_DOMAIN_GTT; obj->mm.dirty = true; + + spin_lock(&obj->vma.lock); + for_each_ggtt_vma(vma, obj) + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) + i915_vma_set_ggtt_write(vma); + spin_unlock(&obj->vma.lock); } i915_gem_object_unpin_pages(obj); @@ -175,138 +185,34 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { - struct i915_vma *vma; int ret; - assert_object_held(obj); - if (obj->cache_level == cache_level) return 0; - /* Inspect the list of currently bound VMA and unbind any that would - * be invalid given the new cache-level. This is principally to - * catch the issue of the CS prefetch crossing page boundaries and - * reading an invalid PTE on older architectures. - */ -restart: - list_for_each_entry(vma, &obj->vma.list, obj_link) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - if (i915_vma_is_pinned(vma)) { - DRM_DEBUG("can not change the cache level of pinned objects\n"); - return -EBUSY; - } - - if (!i915_vma_is_closed(vma) && - i915_gem_valid_gtt_space(vma, cache_level)) - continue; - - ret = i915_vma_unbind(vma); - if (ret) - return ret; - - /* As unbinding may affect other elements in the - * obj->vma_list (due to side-effects from retiring - * an active vma), play safe and restart the iterator. - */ - goto restart; - } + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; - /* We can reuse the existing drm_mm nodes but need to change the - * cache-level on the PTE. We could simply unbind them all and - * rebind with the correct cache-level on next use. However since - * we already have a valid slot, dma mapping, pages etc, we may as - * rewrite the PTE in the belief that doing so tramples upon less - * state and so involves less work. - */ - if (atomic_read(&obj->bind_count)) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); + ret = i915_gem_object_lock_interruptible(obj); + if (ret) + return ret; - /* Before we change the PTE, the GPU must not be accessing it. - * If we wait upon the object, we know that all the bound - * VMA are no longer active. - */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) { - intel_wakeref_t wakeref = - intel_runtime_pm_get(&i915->runtime_pm); - - /* - * Access to snoopable pages through the GTT is - * incoherent and on some machines causes a hard - * lockup. Relinquish the CPU mmaping to force - * userspace to refault in the pages and we can - * then double check if the GTT mapping is still - * valid for that pointer access. - */ - ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex); - if (ret) { - intel_runtime_pm_put(&i915->runtime_pm, - wakeref); - return ret; - } - - if (obj->userfault_count) - __i915_gem_object_release_mmap(obj); - - /* - * As we no longer need a fence for GTT access, - * we can relinquish it now (and so prevent having - * to steal a fence from someone else on the next - * fence request). Note GPU activity would have - * dropped the fence as all snoopable access is - * supposed to be linear. - */ - for_each_ggtt_vma(vma, obj) { - ret = i915_vma_revoke_fence(vma); - if (ret) - break; - } - mutex_unlock(&i915->ggtt.vm.mutex); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - if (ret) - return ret; - } else { - /* - * We either have incoherent backing store and - * so no GTT access or the architecture is fully - * coherent. In such cases, existing GTT mmaps - * ignore the cache bit in the PTE and we can - * rewrite it without confusing the GPU or having - * to force userspace to fault back in its mmaps. - */ - } - - list_for_each_entry(vma, &obj->vma.list, obj_link) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - /* Wait for an earlier async bind, need to rewrite it */ - ret = i915_vma_sync(vma); - if (ret) - return ret; - - ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL); - if (ret) - return ret; - } + /* Always invalidate stale cachelines */ + if (obj->cache_level != cache_level) { + i915_gem_object_set_cache_coherency(obj, cache_level); + obj->cache_dirty = true; } - list_for_each_entry(vma, &obj->vma.list, obj_link) { - if (i915_vm_has_cache_coloring(vma->vm)) - vma->node.color = cache_level; - } - i915_gem_object_set_cache_coherency(obj, cache_level); - obj->cache_dirty = true; /* Always invalidate stale cachelines */ + i915_gem_object_unlock(obj); - return 0; + /* The cache-level will be applied when each vma is rebound. */ + return i915_gem_object_unbind(obj, + I915_GEM_OBJECT_UNBIND_ACTIVE | + I915_GEM_OBJECT_UNBIND_BARRIER); } int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, @@ -387,20 +293,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, goto out; } - if (obj->cache_level == level) - goto out; - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (ret) - goto out; - - ret = i915_gem_object_lock_interruptible(obj); - if (ret == 0) { - ret = i915_gem_object_set_cache_level(obj, level); - i915_gem_object_unlock(obj); - } + ret = i915_gem_object_set_cache_level(obj, level); out: i915_gem_object_put(obj); @@ -419,10 +312,13 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, unsigned int flags) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; int ret; - assert_object_held(obj); + /* Frame buffer must be in LMEM (no migration yet) */ + if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) + return ERR_PTR(-EINVAL); /* * The display engine is not coherent with the LLC cache on gen6. As @@ -435,7 +331,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * with that bit in the PTE to main memory with just one PIPE_CONTROL. */ ret = i915_gem_object_set_cache_level(obj, - HAS_WT(to_i915(obj->base.dev)) ? + HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE); if (ret) return ERR_PTR(ret); @@ -462,13 +358,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - __i915_gem_object_flush_for_display(obj); - - /* - * It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - obj->read_domains |= I915_GEM_DOMAIN_GTT; + i915_gem_object_flush_if_display(obj); return vma; } @@ -479,8 +369,11 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) struct i915_vma *vma; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + if (!atomic_read(&obj->bind_count)) + return; mutex_lock(&i915->ggtt.vm.mutex); + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -488,6 +381,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(vma->vm != &i915->ggtt.vm); list_move_tail(&vma->vm_link, &vma->vm->bound_list); } + spin_unlock(&obj->vma.lock); mutex_unlock(&i915->ggtt.vm.mutex); if (i915_gem_object_is_shrinkable(obj)) { @@ -664,7 +558,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, i915_gem_object_unlock(obj); if (write_domain) - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); out_unpin: i915_gem_object_unpin_pages(obj); @@ -784,7 +678,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, } out: - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); obj->mm.dirty = true; /* return with the pages pinned */ return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index e96901888323..cbd2bcade3c8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -19,11 +19,13 @@ #include "gt/intel_engine_pool.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_ioctls.h" +#include "i915_sw_fence_work.h" #include "i915_trace.h" enum { @@ -227,6 +229,7 @@ struct i915_execbuffer { struct i915_request *request; /** our request to build */ struct i915_vma *batch; /** identity of the batch obj/vma */ + struct i915_vma *trampoline; /** trampoline used for chaining */ /** actual size of execobj[] as we may extend it for the cmdparser */ unsigned int buffer_count; @@ -252,7 +255,6 @@ struct i915_execbuffer { bool has_fence : 1; bool needs_unfenced : 1; - struct intel_context *ce; struct i915_request *rq; u32 *rq_cmd; unsigned int rq_size; @@ -276,28 +278,11 @@ struct i915_execbuffer { #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags]) -/* - * Used to convert any address to canonical form. - * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, - * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the - * addresses to be in a canonical form: - * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct - * canonical form [63:48] == [47]." - */ -#define GEN8_HIGH_ADDRESS_BIT 47 -static inline u64 gen8_canonical_addr(u64 address) -{ - return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); -} - -static inline u64 gen8_noncanonical_addr(u64 address) -{ - return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); -} - static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { - return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; + return intel_engine_requires_cmd_parser(eb->engine) || + (intel_engine_using_cmd_parser(eb->engine) && + eb->args->batch_len); } static int eb_create(struct i915_execbuffer *eb) @@ -745,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) unsigned int i, batch; int err; - if (unlikely(i915_gem_context_is_banned(eb->gem_context))) - return -EIO; - INIT_LIST_HEAD(&eb->relocs); INIT_LIST_HEAD(&eb->unbound); @@ -883,9 +865,6 @@ static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); - if (eb->reloc_cache.ce) - intel_context_put(eb->reloc_cache.ce); - if (eb->lut_size > 0) kfree(eb->buckets); } @@ -909,7 +888,6 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->has_fence = cache->gen < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.flags = 0; - cache->ce = NULL; cache->rq = NULL; cache->rq_size = 0; } @@ -1179,7 +1157,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (err) goto err_unmap; - rq = intel_context_create_request(cache->ce); + rq = i915_request_create(eb->context); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -1243,36 +1221,9 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (unlikely(!cache->rq)) { int err; - /* If we need to copy for the cmdparser, we will stall anyway */ - if (eb_use_cmdparser(eb)) - return ERR_PTR(-EWOULDBLOCK); - if (!intel_engine_can_store_dword(eb->engine)) return ERR_PTR(-ENODEV); - if (!cache->ce) { - struct intel_context *ce; - - /* - * The CS pre-parser can pre-fetch commands across - * memory sync points and starting gen12 it is able to - * pre-fetch across BB_START and BB_END boundaries - * (within the same context). We therefore use a - * separate context gen12+ to guarantee that the reloc - * writes land before the parser gets to the target - * memory location. - */ - if (cache->gen >= 12) - ce = intel_context_create(eb->context->gem_context, - eb->engine); - else - ce = intel_context_get(eb->context); - if (IS_ERR(ce)) - return ERR_CAST(ce); - - cache->ce = ce; - } - err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); @@ -1940,15 +1891,15 @@ err_skip: return err; } -static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) +static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) { if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) - return false; + return -EINVAL; /* Kernel clipping was a DRI1 misfeature */ if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { if (exec->num_cliprects || exec->cliprects_ptr) - return false; + return -EINVAL; } if (exec->DR4 == 0xffffffff) { @@ -1956,12 +1907,12 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) exec->DR4 = 0; } if (exec->DR1 || exec->DR4) - return false; + return -EINVAL; if ((exec->batch_start_offset | exec->batch_len) & 0x7) - return false; + return -EINVAL; - return true; + return 0; } static int i915_reset_gen7_sol_offsets(struct i915_request *rq) @@ -1989,46 +1940,180 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) return 0; } -static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) +static struct i915_vma * +shadow_batch_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + unsigned int flags) { - struct intel_engine_pool_node *pool; struct i915_vma *vma; int err; - pool = intel_engine_get_pool(eb->engine, eb->batch_len); + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + return vma; + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + return ERR_PTR(err); + + return vma; +} + +struct eb_parse_work { + struct dma_fence_work base; + struct intel_engine_cs *engine; + struct i915_vma *batch; + struct i915_vma *shadow; + struct i915_vma *trampoline; + unsigned int batch_offset; + unsigned int batch_length; +}; + +static int __eb_parse(struct dma_fence_work *work) +{ + struct eb_parse_work *pw = container_of(work, typeof(*pw), base); + + return intel_engine_cmd_parser(pw->engine, + pw->batch, + pw->batch_offset, + pw->batch_length, + pw->shadow, + pw->trampoline); +} + +static const struct dma_fence_work_ops eb_parse_ops = { + .name = "eb_parse", + .work = __eb_parse, +}; + +static int eb_parse_pipeline(struct i915_execbuffer *eb, + struct i915_vma *shadow, + struct i915_vma *trampoline) +{ + struct eb_parse_work *pw; + int err; + + pw = kzalloc(sizeof(*pw), GFP_KERNEL); + if (!pw) + return -ENOMEM; + + dma_fence_work_init(&pw->base, &eb_parse_ops); + + pw->engine = eb->engine; + pw->batch = eb->batch; + pw->batch_offset = eb->batch_start_offset; + pw->batch_length = eb->batch_len; + pw->shadow = shadow; + pw->trampoline = trampoline; + + dma_resv_lock(pw->batch->resv, NULL); + + err = dma_resv_reserve_shared(pw->batch->resv, 1); + if (err) + goto err_batch_unlock; + + /* Wait for all writes (and relocs) into the batch to complete */ + err = i915_sw_fence_await_reservation(&pw->base.chain, + pw->batch->resv, NULL, false, + 0, I915_FENCE_GFP); + if (err < 0) + goto err_batch_unlock; + + /* Keep the batch alive and unwritten as we parse */ + dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma); + + dma_resv_unlock(pw->batch->resv); + + /* Force execution to wait for completion of the parser */ + dma_resv_lock(shadow->resv, NULL); + dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); + dma_resv_unlock(shadow->resv); + + dma_fence_work_commit(&pw->base); + return 0; + +err_batch_unlock: + dma_resv_unlock(pw->batch->resv); + kfree(pw); + return err; +} + +static int eb_parse(struct i915_execbuffer *eb) +{ + struct intel_engine_pool_node *pool; + struct i915_vma *shadow, *trampoline; + unsigned int len; + int err; + + if (!eb_use_cmdparser(eb)) + return 0; + + len = eb->batch_len; + if (!CMDPARSER_USES_GGTT(eb->i915)) { + /* + * ppGTT backed shadow buffers must be mapped RO, to prevent + * post-scan tampering + */ + if (!eb->context->vm->has_read_only) { + DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + return -EINVAL; + } + } else { + len += I915_CMD_PARSER_TRAMPOLINE_SIZE; + } + + pool = intel_engine_get_pool(eb->engine, len); if (IS_ERR(pool)) - return ERR_CAST(pool); - - err = intel_engine_cmd_parser(eb->engine, - eb->batch->obj, - pool->obj, - eb->batch_start_offset, - eb->batch_len, - is_master); - if (err) { - if (err == -EACCES) /* unhandled chained batch */ - vma = NULL; - else - vma = ERR_PTR(err); + return PTR_ERR(pool); + + shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER); + if (IS_ERR(shadow)) { + err = PTR_ERR(shadow); goto err; } + i915_gem_object_set_readonly(shadow->obj); - vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) - goto err; + trampoline = NULL; + if (CMDPARSER_USES_GGTT(eb->i915)) { + trampoline = shadow; + + shadow = shadow_batch_pin(pool->obj, + &eb->engine->gt->ggtt->vm, + PIN_GLOBAL); + if (IS_ERR(shadow)) { + err = PTR_ERR(shadow); + shadow = trampoline; + goto err_shadow; + } - eb->vma[eb->buffer_count] = i915_vma_get(vma); + eb->batch_flags |= I915_DISPATCH_SECURE; + } + + err = eb_parse_pipeline(eb, shadow, trampoline); + if (err) + goto err_trampoline; + + eb->vma[eb->buffer_count] = i915_vma_get(shadow); eb->flags[eb->buffer_count] = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; - vma->exec_flags = &eb->flags[eb->buffer_count]; + shadow->exec_flags = &eb->flags[eb->buffer_count]; eb->buffer_count++; - vma->private = pool; - return vma; + eb->trampoline = trampoline; + eb->batch_start_offset = 0; + eb->batch = shadow; + + shadow->private = pool; + return 0; +err_trampoline: + if (trampoline) + i915_vma_unpin(trampoline); +err_shadow: + i915_vma_unpin(shadow); err: intel_engine_pool_put(pool); - return vma; + return err; } static void @@ -2077,7 +2162,17 @@ static int eb_submit(struct i915_execbuffer *eb) if (err) return err; - if (i915_gem_context_nopreempt(eb->gem_context)) + if (eb->trampoline) { + GEM_BUG_ON(eb->batch_start_offset); + err = eb->engine->emit_bb_start(eb->request, + eb->trampoline->node.start + + eb->batch_len, + 0, 0); + if (err) + return err; + } + + if (intel_context_nopreempt(eb->context)) eb->request->flags |= I915_REQUEST_NOPREEMPT; return 0; @@ -2163,6 +2258,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) if (err) return err; + if (unlikely(intel_context_is_banned(ce))) + return -EIO; + /* * Pinning the contexts may generate requests in order to acquire * GGTT space, so do this first before we reserve a seqno for @@ -2429,6 +2527,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct drm_i915_gem_exec_object2 *exec, struct drm_syncobj **fences) { + struct drm_i915_private *i915 = to_i915(dev); struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct dma_fence *exec_fence = NULL; @@ -2440,7 +2539,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); - eb.i915 = to_i915(dev); + eb.i915 = i915; eb.file = file; eb.args = args; if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) @@ -2457,11 +2556,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.buffer_count = args->buffer_count; eb.batch_start_offset = args->batch_start_offset; eb.batch_len = args->batch_len; + eb.trampoline = NULL; eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { + if (INTEL_GEN(i915) >= 11) + return -ENODEV; + + /* Return -EPERM to trigger fallback code on old binaries. */ + if (!HAS_SECURE_BATCHES(i915)) + return -EPERM; + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) - return -EPERM; + return -EPERM; eb.batch_flags |= I915_DISPATCH_SECURE; } @@ -2538,34 +2645,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (eb_use_cmdparser(&eb)) { - struct i915_vma *vma; - - vma = eb_parse(&eb, drm_is_current_master(file)); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_vma; - } - - if (vma) { - /* - * Batch parsed and accepted: - * - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in - * the dispatch_execbuffer implementations. We - * specifically don't want that set on batches the - * command parser has accepted. - */ - eb.batch_flags |= I915_DISPATCH_SECURE; - eb.batch_start_offset = 0; - eb.batch = vma; - } - } - if (eb.batch_len == 0) eb.batch_len = eb.batch->size - eb.batch_start_offset; + err = eb_parse(&eb); + if (err) + goto err_vma; + /* * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. @@ -2644,6 +2730,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_submit(&eb); err_request: add_to_client(eb.request, file); + i915_request_get(eb.request); i915_request_add(eb.request); if (fences) @@ -2659,6 +2746,7 @@ err_request: fput(out_fence->file); } } + i915_request_put(eb.request); err_batch_unpin: if (eb.batch_flags & I915_DISPATCH_SECURE) @@ -2668,6 +2756,8 @@ err_batch_unpin: err_vma: if (eb.exec) eb_release_vmas(&eb); + if (eb.trampoline) + i915_vma_unpin(eb.trampoline); mutex_unlock(&dev->struct_mutex); err_engine: eb_unpin_engine(&eb); @@ -2737,8 +2827,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, exec2.flags = I915_EXEC_RENDER; i915_execbuffer2_set_context_id(exec2, 0); - if (!i915_gem_check_execbuffer(&exec2)) - return -EINVAL; + err = i915_gem_check_execbuffer(&exec2); + if (err) + return err; /* Copy in the exec list from userland */ exec_list = kvmalloc_array(count, sizeof(*exec_list), @@ -2815,8 +2906,9 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (!i915_gem_check_execbuffer(args)) - return -EINVAL; + err = i915_gem_check_execbuffer(args); + if (err) + return err; /* Allocate an extra slot for use by the command parser */ exec2_list = kvmalloc_array(count + 1, eb_element_size(), diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 5ae694c24df4..9cfb0e41ff06 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -164,6 +164,7 @@ struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *i915, phys_addr_t size) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; @@ -178,7 +179,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops); + i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class); /* * Mark the object as volatile, such that the pages are marked as diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h index ddc7f2a52b3e..87d8b27f426d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h @@ -28,8 +28,8 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); +int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); int i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c new file mode 100644 index 000000000000..520cc9cac471 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "intel_memory_region.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_lmem.h" +#include "i915_drv.h" + +const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { + .flags = I915_GEM_OBJECT_HAS_IOMEM, + + .get_pages = i915_gem_object_get_pages_buddy, + .put_pages = i915_gem_object_put_pages_buddy, + .release = i915_gem_object_release_memory_region, +}; + +/* XXX: Time to vfunc your life up? */ +void __iomem * +i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, + unsigned long n) +{ + resource_size_t offset; + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE); +} + +void __iomem * +i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, + unsigned long n) +{ + resource_size_t offset; + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset); +} + +void __iomem * +i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, + unsigned long n, + unsigned long size) +{ + resource_size_t offset; + + GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); +} + +bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) +{ + return obj->ops == &i915_gem_lmem_obj_ops; +} + +struct drm_i915_gem_object * +i915_gem_object_create_lmem(struct drm_i915_private *i915, + resource_size_t size, + unsigned int flags) +{ + return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM], + size, flags); +} + +struct drm_i915_gem_object * +__i915_gem_lmem_object_create(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) +{ + static struct lock_class_key lock_class; + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class); + + obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT; + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); + + i915_gem_object_init_memory_region(obj, mem, flags); + + return obj; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h new file mode 100644 index 000000000000..7c176b8b7d2f --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_LMEM_H +#define __I915_GEM_LMEM_H + +#include <linux/types.h> + +struct drm_i915_private; +struct drm_i915_gem_object; +struct intel_memory_region; + +extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; + +void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, + unsigned long n, unsigned long size); +void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, + unsigned long n); +void __iomem * +i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, + unsigned long n); + +bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); + +struct drm_i915_gem_object * +i915_gem_object_create_lmem(struct drm_i915_private *i915, + resource_size_t size, + unsigned int flags); + +struct drm_i915_gem_object * +__i915_gem_lmem_object_create(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags); + +#endif /* !__I915_GEM_LMEM_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index fd4122d8c0a9..879fff8adc48 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -5,6 +5,7 @@ */ #include <linux/mman.h> +#include <linux/pfn_t.h> #include <linux/sizes.h> #include "gt/intel_gt.h" @@ -14,7 +15,9 @@ #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" +#include "i915_gem_mman.h" #include "i915_trace.h" +#include "i915_user_extensions.h" #include "i915_vma.h" static inline bool @@ -144,6 +147,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 3 - Remove implicit set-domain(GTT) and synchronisation on initial * pagefault; swapin remains transparent. * + * 4 - Support multiple fault handlers per object depending on object's + * backing storage (a.k.a. MMAP_OFFSET). + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -171,7 +177,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 3; + return 4; } static inline struct i915_ggtt_view @@ -197,29 +203,83 @@ compute_partial_view(const struct drm_i915_gem_object *obj, return view; } -/** - * i915_gem_fault - fault a page into the GTT - * @vmf: fault info - * - * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped - * from userspace. The fault handler takes care of binding the object to - * the GTT (if needed), allocating and programming a fence register (again, - * only if needed based on whether the old reg is still valid or the object - * is tiled) and inserting a new PTE into the faulting process. - * - * Note that the faulting process may involve evicting existing objects - * from the GTT and/or fence registers to make room. So performance may - * suffer if the GTT working set is large or there are few fence registers - * left. - * - * The current feature set supported by i915_gem_fault() and thus GTT mmaps - * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). - */ -vm_fault_t i915_gem_fault(struct vm_fault *vmf) +static vm_fault_t i915_error_to_vmf_fault(int err) +{ + switch (err) { + default: + WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); + /* fallthrough */ + case -EIO: /* shmemfs failure from swap device */ + case -EFAULT: /* purged object */ + case -ENODEV: /* bad object, how did you get here! */ + return VM_FAULT_SIGBUS; + + case -ENOSPC: /* shmemfs allocation failure */ + case -ENOMEM: /* our allocation failure */ + return VM_FAULT_OOM; + + case 0: + case -EAGAIN: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + } +} + +static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) +{ + struct vm_area_struct *area = vmf->vma; + struct i915_mmap_offset *mmo = area->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; + unsigned long i, size = area->vm_end - area->vm_start; + bool write = area->vm_flags & VM_WRITE; + vm_fault_t ret = VM_FAULT_SIGBUS; + int err; + + if (!i915_gem_object_has_struct_page(obj)) + return ret; + + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return ret; + + err = i915_gem_object_pin_pages(obj); + if (err) + return i915_error_to_vmf_fault(err); + + /* PTEs are revoked in obj->ops->put_pages() */ + for (i = 0; i < size >> PAGE_SHIFT; i++) { + struct page *page = i915_gem_object_get_page(obj, i); + + ret = vmf_insert_pfn(area, + (unsigned long)area->vm_start + i * PAGE_SIZE, + page_to_pfn(page)); + if (ret != VM_FAULT_NOPAGE) + break; + } + + if (write) { + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + obj->cache_dirty = true; /* XXX flush after PAT update? */ + obj->mm.dirty = true; + } + + i915_gem_object_unpin_pages(obj); + + return ret; +} + +static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) struct vm_area_struct *area = vmf->vma; - struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); + struct i915_mmap_offset *mmo = area->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; struct drm_device *dev = obj->base.dev; struct drm_i915_private *i915 = to_i915(dev); struct intel_runtime_pm *rpm = &i915->runtime_pm; @@ -312,7 +372,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) list_add(&obj->userfault_link, &i915->ggtt.userfault_list); mutex_unlock(&i915->ggtt.vm.mutex); - if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + /* Track the mmo associated with the fenced vma */ + vma->mmo = mmo; + + if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); @@ -332,67 +395,36 @@ err_rpm: intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); err: - switch (ret) { - default: - WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); - /* fallthrough */ - case -EIO: /* shmemfs failure from swap device */ - case -EFAULT: /* purged object */ - case -ENODEV: /* bad object, how did you get here! */ - return VM_FAULT_SIGBUS; - - case -ENOSPC: /* shmemfs allocation failure */ - case -ENOMEM: /* our allocation failure */ - return VM_FAULT_OOM; - - case 0: - case -EAGAIN: - case -ERESTARTSYS: - case -EINTR: - case -EBUSY: - /* - * EBUSY is ok: this just means that another thread - * already did the job. - */ - return VM_FAULT_NOPAGE; - } + return i915_error_to_vmf_fault(ret); } -void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct i915_vma *vma; GEM_BUG_ON(!obj->userfault_count); - obj->userfault_count = 0; - list_del(&obj->userfault_link); - drm_vma_node_unmap(&obj->base.vma_node, - obj->base.dev->anon_inode->i_mapping); - for_each_ggtt_vma(vma, obj) - i915_vma_unset_userfault(vma); + i915_vma_revoke_mmap(vma); + + GEM_BUG_ON(obj->userfault_count); } -/** - * i915_gem_object_release_mmap - remove physical page mappings - * @obj: obj in question - * - * Preserve the reservation of the mmapping with the DRM core code, but - * relinquish ownership of the pages back to the system. - * +/* * It is vital that we remove the page mapping if we have mapped a tiled * object through the GTT and then lose the fence register due to * resource pressure. Similarly if the object has been moved out of the * aperture, than pages mapped into userspace must be revoked. Removing the * mapping will then trigger a page fault on the next user access, allowing - * fixup by i915_gem_fault(). + * fixup by vm_fault_gtt(). */ -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); intel_wakeref_t wakeref; - /* Serialisation between user GTT access and our code depends upon + /* + * Serialisation between user GTT access and our code depends upon * revoking the CPU's PTE whilst the mutex is held. The next user * pagefault then has to wait until we release the mutex. * @@ -406,9 +438,10 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) if (!obj->userfault_count) goto out; - __i915_gem_object_release_mmap(obj); + __i915_gem_object_release_mmap_gtt(obj); - /* Ensure that the CPU's PTE are revoked and there are not outstanding + /* + * Ensure that the CPU's PTE are revoked and there are not outstanding * memory transactions from userspace before we return. The TLB * flushing implied above by changing the PTE above *should* be * sufficient, an extra barrier here just provides us with a bit @@ -422,54 +455,149 @@ out: intel_runtime_pm_put(&i915->runtime_pm, wakeref); } -static int create_mmap_offset(struct drm_i915_gem_object *obj) +void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct i915_mmap_offset *mmo; + + spin_lock(&obj->mmo.lock); + list_for_each_entry(mmo, &obj->mmo.offsets, offset) { + /* + * vma_node_unmap for GTT mmaps handled already in + * __i915_gem_object_release_mmap_gtt + */ + if (mmo->mmap_type == I915_MMAP_TYPE_GTT) + continue; + + spin_unlock(&obj->mmo.lock); + drm_vma_node_unmap(&mmo->vma_node, + obj->base.dev->anon_inode->i_mapping); + spin_lock(&obj->mmo.lock); + } + spin_unlock(&obj->mmo.lock); +} + +/** + * i915_gem_object_release_mmap - remove physical page mappings + * @obj: obj in question + * + * Preserve the reservation of the mmapping with the DRM core code, but + * relinquish ownership of the pages back to the system. + */ +void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +{ + i915_gem_object_release_mmap_gtt(obj); + i915_gem_object_release_mmap_offset(obj); +} + +static struct i915_mmap_offset * +mmap_offset_attach(struct drm_i915_gem_object *obj, + enum i915_mmap_type mmap_type, + struct drm_file *file) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_gt *gt = &i915->gt; + struct i915_mmap_offset *mmo; int err; - err = drm_gem_create_mmap_offset(&obj->base); + mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); + if (!mmo) + return ERR_PTR(-ENOMEM); + + mmo->obj = obj; + mmo->dev = obj->base.dev; + mmo->file = file; + mmo->mmap_type = mmap_type; + drm_vma_node_reset(&mmo->vma_node); + + err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node, + obj->base.size / PAGE_SIZE); if (likely(!err)) - return 0; + goto out; /* Attempt to reap some mmap space from dead objects */ - err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT); + err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT); if (err) - return err; + goto err; i915_gem_drain_freed_objects(i915); - return drm_gem_create_mmap_offset(&obj->base); + err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node, + obj->base.size / PAGE_SIZE); + if (err) + goto err; + +out: + if (file) + drm_vma_node_allow(&mmo->vma_node, file); + + spin_lock(&obj->mmo.lock); + list_add(&mmo->offset, &obj->mmo.offsets); + spin_unlock(&obj->mmo.lock); + + return mmo; + +err: + kfree(mmo); + return ERR_PTR(err); } -int -i915_gem_mmap_gtt(struct drm_file *file, - struct drm_device *dev, - u32 handle, - u64 *offset) +static int +__assign_mmap_offset(struct drm_file *file, + u32 handle, + enum i915_mmap_type mmap_type, + u64 *offset) { struct drm_i915_gem_object *obj; - int ret; + struct i915_mmap_offset *mmo; + int err; obj = i915_gem_object_lookup(file, handle); if (!obj) return -ENOENT; - if (i915_gem_object_never_bind_ggtt(obj)) { - ret = -ENODEV; + if (mmap_type == I915_MMAP_TYPE_GTT && + i915_gem_object_never_bind_ggtt(obj)) { + err = -ENODEV; + goto out; + } + + if (mmap_type != I915_MMAP_TYPE_GTT && + !i915_gem_object_has_struct_page(obj)) { + err = -ENODEV; goto out; } - ret = create_mmap_offset(obj); - if (ret == 0) - *offset = drm_vma_node_offset_addr(&obj->base.vma_node); + mmo = mmap_offset_attach(obj, mmap_type, file); + if (IS_ERR(mmo)) { + err = PTR_ERR(mmo); + goto out; + } + *offset = drm_vma_node_offset_addr(&mmo->vma_node); + err = 0; out: i915_gem_object_put(obj); - return ret; + return err; +} + +int +i915_gem_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, + u64 *offset) +{ + enum i915_mmap_type mmap_type; + + if (boot_cpu_has(X86_FEATURE_PAT)) + mmap_type = I915_MMAP_TYPE_WC; + else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt)) + return -ENODEV; + else + mmap_type = I915_MMAP_TYPE_GTT; + + return __assign_mmap_offset(file, handle, mmap_type, offset); } /** - * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing + * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device * @data: GTT mapping ioctl data * @file: GEM object info @@ -484,12 +612,179 @@ out: * userspace. */ int -i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) { - struct drm_i915_gem_mmap_gtt *args = data; + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_mmap_offset *args = data; + enum i915_mmap_type type; + int err; - return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); + /* + * Historically we failed to check args.pad and args.offset + * and so we cannot use those fields for user input and we cannot + * add -EINVAL for them as the ABI is fixed, i.e. old userspace + * may be feeding in garbage in those fields. + * + * if (args->pad) return -EINVAL; is verbotten! + */ + + err = i915_user_extensions(u64_to_user_ptr(args->extensions), + NULL, 0, NULL); + if (err) + return err; + + switch (args->flags) { + case I915_MMAP_OFFSET_GTT: + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return -ENODEV; + type = I915_MMAP_TYPE_GTT; + break; + + case I915_MMAP_OFFSET_WC: + if (!boot_cpu_has(X86_FEATURE_PAT)) + return -ENODEV; + type = I915_MMAP_TYPE_WC; + break; + + case I915_MMAP_OFFSET_WB: + type = I915_MMAP_TYPE_WB; + break; + + case I915_MMAP_OFFSET_UC: + if (!boot_cpu_has(X86_FEATURE_PAT)) + return -ENODEV; + type = I915_MMAP_TYPE_UC; + break; + + default: + return -EINVAL; + } + + return __assign_mmap_offset(file, args->handle, type, &args->offset); +} + +static void vm_open(struct vm_area_struct *vma) +{ + struct i915_mmap_offset *mmo = vma->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; + + GEM_BUG_ON(!obj); + i915_gem_object_get(obj); +} + +static void vm_close(struct vm_area_struct *vma) +{ + struct i915_mmap_offset *mmo = vma->vm_private_data; + struct drm_i915_gem_object *obj = mmo->obj; + + GEM_BUG_ON(!obj); + i915_gem_object_put(obj); +} + +static const struct vm_operations_struct vm_ops_gtt = { + .fault = vm_fault_gtt, + .open = vm_open, + .close = vm_close, +}; + +static const struct vm_operations_struct vm_ops_cpu = { + .fault = vm_fault_cpu, + .open = vm_open, + .close = vm_close, +}; + +/* + * This overcomes the limitation in drm_gem_mmap's assignment of a + * drm_gem_object as the vma->vm_private_data. Since we need to + * be able to resolve multiple mmap offsets which could be tied + * to a single gem object. + */ +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_vma_offset_node *node; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct i915_mmap_offset *mmo = NULL; + struct drm_gem_object *obj = NULL; + + if (drm_dev_is_unplugged(dev)) + return -ENODEV; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, + vma_pages(vma)); + if (likely(node)) { + mmo = container_of(node, struct i915_mmap_offset, + vma_node); + /* + * In our dependency chain, the drm_vma_offset_node + * depends on the validity of the mmo, which depends on + * the gem object. However the only reference we have + * at this point is the mmo (as the parent of the node). + * Try to check if the gem object was at least cleared. + */ + if (!mmo || !mmo->obj) { + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + return -EINVAL; + } + /* + * Skip 0-refcnted objects as it is in the process of being + * destroyed and will be invalid when the vma manager lock + * is released. + */ + obj = &mmo->obj->base; + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + if (!obj) + return -EINVAL; + + if (!drm_vma_node_is_allowed(node, priv)) { + drm_gem_object_put_unlocked(obj); + return -EACCES; + } + + if (i915_gem_object_is_readonly(to_intel_bo(obj))) { + if (vma->vm_flags & VM_WRITE) { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + vma->vm_flags &= ~VM_MAYWRITE; + } + + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_private_data = mmo; + + switch (mmo->mmap_type) { + case I915_MMAP_TYPE_WC: + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = &vm_ops_cpu; + break; + + case I915_MMAP_TYPE_WB: + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &vm_ops_cpu; + break; + + case I915_MMAP_TYPE_UC: + vma->vm_page_prot = + pgprot_noncached(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = &vm_ops_cpu; + break; + + case I915_MMAP_TYPE_GTT: + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_ops = &vm_ops_gtt; + break; + } + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + + return 0; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h new file mode 100644 index 000000000000..862e01b7cb69 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_MMAN_H__ +#define __I915_GEM_MMAN_H__ + +#include <linux/mm_types.h> +#include <linux/types.h> + +struct drm_device; +struct drm_file; +struct drm_i915_gem_object; +struct file; +struct i915_mmap_offset; +struct mutex; + +int i915_gem_mmap_gtt_version(void); +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, + struct drm_device *dev, + u32 handle, u64 *offset); + +void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index dbf9be9a79f4..46bacc82ddc4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -22,11 +22,14 @@ * */ +#include <linux/sched/mm.h> + #include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" +#include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_globals.h" #include "i915_trace.h" @@ -47,9 +50,10 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) } void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops) + const struct drm_i915_gem_object_ops *ops, + struct lock_class_key *key) { - mutex_init(&obj->mm.lock); + __mutex_init(&obj->mm.lock, "obj->mm.lock", key); spin_lock_init(&obj->vma.lock); INIT_LIST_HEAD(&obj->vma.list); @@ -58,6 +62,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->lut_list); + spin_lock_init(&obj->mmo.lock); + INIT_LIST_HEAD(&obj->mmo.offsets); + init_rcu_head(&obj->rcu); obj->ops = ops; @@ -94,6 +101,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) struct drm_i915_gem_object *obj = to_intel_bo(gem); struct drm_i915_file_private *fpriv = file->driver_priv; struct i915_lut_handle *lut, *ln; + struct i915_mmap_offset *mmo; LIST_HEAD(close); i915_gem_object_lock(obj); @@ -108,6 +116,17 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) } i915_gem_object_unlock(obj); + spin_lock(&obj->mmo.lock); + list_for_each_entry(mmo, &obj->mmo.offsets, offset) { + if (mmo->file != file) + continue; + + spin_unlock(&obj->mmo.lock); + drm_vma_node_revoke(&mmo->vma_node, file); + spin_lock(&obj->mmo.lock); + } + spin_unlock(&obj->mmo.lock); + list_for_each_entry_safe(lut, ln, &close, obj_link) { struct i915_gem_context *ctx = lut->ctx; struct i915_vma *vma; @@ -155,6 +174,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, wakeref = intel_runtime_pm_get(&i915->runtime_pm); llist_for_each_entry_safe(obj, on, freed, freed) { + struct i915_mmap_offset *mmo, *mn; + trace_i915_gem_object_destroy(obj); if (!list_empty(&obj->vma.list)) { @@ -173,19 +194,28 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(vma->obj != obj); spin_unlock(&obj->vma.lock); - i915_vma_destroy(vma); + __i915_vma_put(vma); spin_lock(&obj->vma.lock); } spin_unlock(&obj->vma.lock); } + i915_gem_object_release_mmap(obj); + + list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { + drm_vma_offset_remove(obj->base.dev->vma_offset_manager, + &mmo->vma_node); + kfree(mmo); + } + INIT_LIST_HEAD(&obj->mmo.offsets); + GEM_BUG_ON(atomic_read(&obj->bind_count)); GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(!list_empty(&obj->lut_list)); atomic_set(&obj->mm.pages_pin_count, 0); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); GEM_BUG_ON(i915_gem_object_has_pages(obj)); bitmap_free(obj->bit_17); @@ -276,18 +306,14 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: - for_each_ggtt_vma(vma, obj) - intel_gt_flush_ggtt_writes(vma->vm->gt); - - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); - + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { - if (vma->iomap) - continue; - - i915_vma_unset_ggtt_write(vma); + if (i915_vma_unset_ggtt_write(vma)) + intel_gt_flush_ggtt_writes(vma->vm->gt); } + spin_unlock(&obj->vma.lock); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); break; case I915_GEM_DOMAIN_WC: @@ -307,6 +333,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, obj->write_domain = 0; } +void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (front) { + intel_frontbuffer_flush(front, origin); + intel_frontbuffer_put(front); + } +} + +void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (front) { + intel_frontbuffer_invalidate(front, origin); + intel_frontbuffer_put(front); + } +} + void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 85921796851f..858f8bf49a04 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -13,8 +13,8 @@ #include <drm/i915_drm.h> +#include "display/intel_frontbuffer.h" #include "i915_gem_object_types.h" - #include "i915_gem_gtt.h" void i915_gem_init__objects(struct drm_i915_private *i915); @@ -23,7 +23,8 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops); + const struct drm_i915_gem_object_ops *ops, + struct lock_class_key *key); struct drm_i915_gem_object * i915_gem_object_create_shmem(struct drm_i915_private *i915, resource_size_t size); @@ -131,13 +132,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, static inline void i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) { - obj->base.vma_node.readonly = true; + obj->flags |= I915_BO_READONLY; } static inline bool i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) { - return obj->base.vma_node.readonly; + return obj->flags & I915_BO_READONLY; } static inline bool @@ -270,10 +271,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ + I915_MM_NORMAL = 0, + /* + * Only used by struct_mutex, when called "recursively" from + * direct-reclaim-esque. Safe because there is only every one + * struct_mutex in the entire system. + */ + I915_MM_SHRINKER = 1, + /* + * Used for obj->mm.lock when allocating pages. Safe because the object + * isn't yet on any LRU, and therefore the shrinker can't deadlock on + * it. As soon as the object has pages, obj->mm.lock nests within + * fs_reclaim. + */ + I915_MM_GET_PAGES = 1, +}; + static inline int __must_check i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) { - might_lock(&obj->mm.lock); + might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) return 0; @@ -316,13 +334,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unpin_pages(obj); } -enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ - I915_MM_NORMAL = 0, - I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ -}; - -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass); +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj); @@ -375,9 +387,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } -void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); - void i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains); @@ -461,6 +470,26 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); -#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) + +void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); +void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin); + +static inline void +i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (unlikely(rcu_access_pointer(obj->frontbuffer))) + __i915_gem_object_flush_frontbuffer(obj, origin); +} + +static inline void +i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + if (unlikely(rcu_access_pointer(obj->frontbuffer))) + __i915_gem_object_invalidate_frontbuffer(obj, origin); +} #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index 5bd8de124d74..70809d8897cd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -8,6 +8,7 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_engine_pool.h" #include "gt/intel_gt.h" +#include "gt/intel_ring.h" #include "i915_gem_clflush.h" #include "i915_gem_object_blt.h" @@ -16,7 +17,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, u32 value) { struct drm_i915_private *i915 = ce->vm->i915; - const u32 block_size = S16_MAX * PAGE_SIZE; + const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */ struct intel_engine_pool_node *pool; struct i915_vma *batch; u64 offset; @@ -29,7 +30,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); intel_engine_pm_get(ce->engine); - count = div_u64(vma->size, block_size); + count = div_u64(round_up(vma->size, block_size), block_size); size = (1 + 8 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); pool = intel_engine_get_pool(ce->engine, size); @@ -200,7 +201,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, struct i915_vma *dst) { struct drm_i915_private *i915 = ce->vm->i915; - const u32 block_size = S16_MAX * PAGE_SIZE; + const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */ struct intel_engine_pool_node *pool; struct i915_vma *batch; u64 src_offset, dst_offset; @@ -213,7 +214,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); intel_engine_pm_get(ce->engine); - count = div_u64(dst->size, block_size); + count = div_u64(round_up(dst->size, block_size), block_size); size = (1 + 11 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); pool = intel_engine_get_pool(ce->engine, size); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index a387e3ee728b..88e268633fdc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -31,10 +31,11 @@ struct i915_lut_handle { struct drm_i915_gem_object_ops { unsigned int flags; #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) -#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) -#define I915_GEM_OBJECT_IS_PROXY BIT(2) -#define I915_GEM_OBJECT_NO_GGTT BIT(3) -#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4) +#define I915_GEM_OBJECT_HAS_IOMEM BIT(1) +#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2) +#define I915_GEM_OBJECT_IS_PROXY BIT(3) +#define I915_GEM_OBJECT_NO_GGTT BIT(4) +#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -62,6 +63,23 @@ struct drm_i915_gem_object_ops { void (*release)(struct drm_i915_gem_object *obj); }; +enum i915_mmap_type { + I915_MMAP_TYPE_GTT = 0, + I915_MMAP_TYPE_WC, + I915_MMAP_TYPE_WB, + I915_MMAP_TYPE_UC, +}; + +struct i915_mmap_offset { + struct drm_device *dev; + struct drm_vma_offset_node vma_node; + struct drm_i915_gem_object *obj; + struct drm_file *file; + enum i915_mmap_type mmap_type; + + struct list_head offset; +}; + struct drm_i915_gem_object { struct drm_gem_object base; @@ -117,12 +135,18 @@ struct drm_i915_gem_object { unsigned int userfault_count; struct list_head userfault_link; + struct { + spinlock_t lock; /* Protects access to mmo offsets */ + struct list_head offsets; + } mmo; + I915_SELFTEST_DECLARE(struct list_head st_link); unsigned long flags; #define I915_BO_ALLOC_CONTIGUOUS BIT(0) #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) +#define I915_BO_READONLY BIT(2) /* * Is the object to be mapped as read-only to the GPU @@ -149,7 +173,7 @@ struct drm_i915_gem_object { */ u16 write_domain; - struct intel_frontbuffer *frontbuffer; + struct intel_frontbuffer __rcu *frontbuffer; /** Current tiling stride for the object, if it's tiled. */ unsigned int tiling_and_stride; @@ -161,7 +185,11 @@ struct drm_i915_gem_object { atomic_t bind_count; struct { - struct mutex lock; /* protects the pages and their use */ + /* + * Protects the pages and their use. Do not use directly, but + * instead go through the pin/unpin interfaces. + */ + struct mutex lock; atomic_t pages_pin_count; atomic_t shrink_pin; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b0ec0959c13f..75197ca696a8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -7,6 +7,8 @@ #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" +#include "i915_gem_lmem.h" +#include "i915_gem_mman.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, @@ -105,7 +107,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { int err; - err = mutex_lock_interruptible(&obj->mm.lock); + err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); if (err) return err; @@ -154,6 +156,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) rcu_read_unlock(); } +static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) +{ + if (i915_gem_object_is_lmem(obj)) + io_mapping_unmap((void __force __iomem *)ptr); + else if (is_vmalloc_addr(ptr)) + vunmap(ptr); + else + kunmap(kmap_to_page(ptr)); +} + struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { @@ -169,14 +181,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { - void *ptr; - - ptr = page_mask_bits(obj->mm.mapping); - if (is_vmalloc_addr(ptr)) - vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); - + unmap_object(obj, page_mask_bits(obj->mm.mapping)); obj->mm.mapping = NULL; } @@ -186,8 +191,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) return pages; } -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass) +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; int err; @@ -198,12 +202,14 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, GEM_BUG_ON(atomic_read(&obj->bind_count)); /* May be called by shrinker from within get_pages() (on another bo) */ - mutex_lock_nested(&obj->mm.lock, subclass); + mutex_lock(&obj->mm.lock); if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { err = -EBUSY; goto unlock; } + i915_gem_object_release_mmap_offset(obj); + /* * ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt @@ -231,7 +237,7 @@ unlock: } /* The 'mapping' part of i915_gem_object_pin_map() below */ -static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, +static void *i915_gem_object_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT; @@ -244,6 +250,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, pgprot_t pgprot; void *addr; + if (i915_gem_object_is_lmem(obj)) { + void __iomem *io; + + if (type != I915_MAP_WC) + return NULL; + + io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size); + return (void __force *)io; + } + /* A single page can always be kmapped */ if (n_pages == 1 && type == I915_MAP_WB) return kmap(sg_page(sgt->sgl)); @@ -285,14 +301,16 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { enum i915_map_type has_type; + unsigned int flags; bool pinned; void *ptr; int err; - if (unlikely(!i915_gem_object_has_struct_page(obj))) + flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; + if (!i915_gem_object_type_has(obj, flags)) return ERR_PTR(-ENXIO); - err = mutex_lock_interruptible(&obj->mm.lock); + err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); if (err) return ERR_PTR(err); @@ -321,10 +339,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, goto err_unpin; } - if (is_vmalloc_addr(ptr)) - vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); + unmap_object(obj, ptr); ptr = obj->mm.mapping = NULL; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 8043ff63d73f..b1b7c1b3038a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -164,7 +164,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) if (err) return err; - mutex_lock(&obj->mm.lock); + mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); if (obj->mm.madv != I915_MADV_WILLNEED) { err = -EFAULT; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 7987b54fb1f5..c8264eb036bf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -11,83 +11,13 @@ #include "i915_drv.h" -static int pm_notifier(struct notifier_block *nb, - unsigned long action, - void *data) -{ - struct drm_i915_private *i915 = - container_of(nb, typeof(*i915), gem.pm_notifier); - - switch (action) { - case INTEL_GT_UNPARK: - break; - - case INTEL_GT_PARK: - i915_vma_parked(i915); - break; - } - - return NOTIFY_OK; -} - -static bool switch_to_kernel_context_sync(struct intel_gt *gt) -{ - bool result = !intel_gt_is_wedged(gt); - - if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { - /* XXX hide warning from gem_eio */ - if (i915_modparams.reset) { - dev_err(gt->i915->drm.dev, - "Failed to idle engines, declaring wedged!\n"); - GEM_TRACE_DUMP(); - } - - /* - * Forcibly cancel outstanding work and leave - * the gpu quiet. - */ - intel_gt_set_wedged(gt); - result = false; - } - - if (intel_gt_pm_wait_for_idle(gt)) - result = false; - - return result; -} - -bool i915_gem_load_power_context(struct drm_i915_private *i915) -{ - return switch_to_kernel_context_sync(&i915->gt); -} - -static void user_forcewake(struct intel_gt *gt, bool suspend) -{ - int count = atomic_read(>->user_wakeref); - - /* Inside suspend/resume so single threaded, no races to worry about. */ - if (likely(!count)) - return; - - intel_gt_pm_get(gt); - if (suspend) { - GEM_BUG_ON(count > atomic_read(>->wakeref.count)); - atomic_sub(count, >->wakeref.count); - } else { - atomic_add(count, >->wakeref.count); - } - intel_gt_pm_put(gt); -} - void i915_gem_suspend(struct drm_i915_private *i915) { - GEM_TRACE("\n"); + GEM_TRACE("%s\n", dev_name(i915->drm.dev)); intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); flush_workqueue(i915->wq); - user_forcewake(&i915->gt, true); - /* * We have to flush all the executing contexts to main memory so * that they can saved in the hibernation image. To ensure the last @@ -97,10 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - intel_gt_suspend(&i915->gt); - intel_uc_suspend(&i915->gt.uc); - - cancel_delayed_work_sync(&i915->gt.hangcheck.work); + intel_gt_suspend_prepare(&i915->gt); i915_gem_drain_freed_objects(i915); } @@ -142,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ + intel_gt_suspend_late(&i915->gt); + spin_lock_irqsave(&i915->mm.obj_lock, flags); for (phase = phases; *phase; phase++) { LIST_HEAD(keep); @@ -166,51 +95,16 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) list_splice_tail(&keep, *phase); } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - - i915_gem_sanitize(i915); } void i915_gem_resume(struct drm_i915_private *i915) { - GEM_TRACE("\n"); - - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - - if (intel_gt_init_hw(&i915->gt)) - goto err_wedged; + GEM_TRACE("%s\n", dev_name(i915->drm.dev)); /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ - if (intel_gt_resume(&i915->gt)) - goto err_wedged; - - intel_uc_resume(&i915->gt.uc); - - /* Always reload a context for powersaving. */ - if (!i915_gem_load_power_context(i915)) - goto err_wedged; - - user_forcewake(&i915->gt, false); - -out_unlock: - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - return; - -err_wedged: - if (!intel_gt_is_wedged(&i915->gt)) { - dev_err(i915->drm.dev, - "Failed to re-initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(&i915->gt); - } - goto out_unlock; -} - -void i915_gem_init__pm(struct drm_i915_private *i915) -{ - i915->gem.pm_notifier.notifier_call = pm_notifier; - blocking_notifier_chain_register(&i915->gt.pm_notifications, - &i915->gem.pm_notifier); + intel_gt_resume(&i915->gt); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h index 6f7d5d11ac3b..26b78dbdc225 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h @@ -12,9 +12,6 @@ struct drm_i915_private; struct work_struct; -void i915_gem_init__pm(struct drm_i915_private *i915); - -bool i915_gem_load_power_context(struct drm_i915_private *i915); void i915_gem_resume(struct drm_i915_private *i915); void i915_gem_idle_work_handler(struct work_struct *work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 2f7bcfb9c964..d50adac12249 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -85,7 +85,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) } prev_end = offset + block_size; - }; + } sg_page_sizes |= sg->length; sg_mark_end(sg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index be68b76e13b3..4d69c3fc3439 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -465,6 +465,7 @@ create_shmem(struct intel_memory_region *mem, resource_size_t size, unsigned int flags) { + static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; struct drm_i915_gem_object *obj; struct address_space *mapping; @@ -491,7 +492,7 @@ create_shmem(struct intel_memory_region *mem, mapping_set_gfp_mask(mapping, mask); GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); - i915_gem_object_init(obj, &i915_gem_shmem_ops); + i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index fd3ce6da8497..f7e4b39c734f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -57,7 +57,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, flags = I915_GEM_OBJECT_UNBIND_ACTIVE; if (i915_gem_object_unbind(obj, flags) == 0) - __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + __i915_gem_object_put_pages(obj); return !i915_gem_object_has_pages(obj); } @@ -209,8 +209,7 @@ i915_gem_shrink(struct drm_i915_private *i915, if (unsafe_drop_pages(obj, shrink)) { /* May arrive from get_pages on another bo */ - mutex_lock_nested(&obj->mm.lock, - I915_MM_SHRINKER); + mutex_lock(&obj->mm.lock); if (!i915_gem_object_has_pages(obj)) { try_to_writeback(obj, shrink); count += obj->base.size >> PAGE_SHIFT; @@ -437,12 +436,12 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, fs_reclaim_acquire(GFP_KERNEL); mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); - mutex_release(&mutex->dep_map, 0, _RET_IP_); + mutex_release(&mutex->dep_map, _RET_IP_); fs_reclaim_release(GFP_KERNEL); if (unlock) - mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); + mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_); } #define obj_to_i915(obj__) to_i915((obj__)->base.dev) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 57cd8bc2657c..afb08a1704a2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -26,48 +26,49 @@ * for is a boon. */ -int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, +int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end) { int ret; - if (!drm_mm_initialized(&dev_priv->mm.stolen)) + if (!drm_mm_initialized(&i915->mm.stolen)) return -ENODEV; /* WaSkipStolenMemoryFirstPage:bdw+ */ - if (INTEL_GEN(dev_priv) >= 8 && start < 4096) + if (INTEL_GEN(i915) >= 8 && start < 4096) start = 4096; - mutex_lock(&dev_priv->mm.stolen_lock); - ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, + mutex_lock(&i915->mm.stolen_lock); + ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, size, alignment, 0, start, end, DRM_MM_INSERT_BEST); - mutex_unlock(&dev_priv->mm.stolen_lock); + mutex_unlock(&i915->mm.stolen_lock); return ret; } -int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, +int i915_gem_stolen_insert_node(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment) { - return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, + return i915_gem_stolen_insert_node_in_range(i915, node, size, alignment, 0, U64_MAX); } -void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, +void i915_gem_stolen_remove_node(struct drm_i915_private *i915, struct drm_mm_node *node) { - mutex_lock(&dev_priv->mm.stolen_lock); + mutex_lock(&i915->mm.stolen_lock); drm_mm_remove_node(node); - mutex_unlock(&dev_priv->mm.stolen_lock); + mutex_unlock(&i915->mm.stolen_lock); } -static int i915_adjust_stolen(struct drm_i915_private *dev_priv, +static int i915_adjust_stolen(struct drm_i915_private *i915, struct resource *dsm) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_uncore *uncore = ggtt->vm.gt->uncore; struct resource *r; if (dsm->start == 0 || dsm->end <= dsm->start) @@ -79,14 +80,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, */ /* Make sure we don't clobber the GTT if it's within stolen memory */ - if (INTEL_GEN(dev_priv) <= 4 && - !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { + if (INTEL_GEN(i915) <= 4 && + !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { struct resource stolen[2] = {*dsm, *dsm}; struct resource ggtt_res; resource_size_t ggtt_start; - ggtt_start = I915_READ(PGTBL_CTL); - if (IS_GEN(dev_priv, 4)) + ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); + if (IS_GEN(i915, 4)) ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; else @@ -120,7 +121,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, * kernel. So if the region is already marked as busy, something * is seriously wrong. */ - r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, + r = devm_request_mem_region(i915->drm.dev, dsm->start, resource_size(dsm), "Graphics Stolen Memory"); if (r == NULL) { @@ -133,14 +134,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, * reservation starting from 1 instead of 0. * There's also BIOS with off-by-one on the other end. */ - r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, + r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, resource_size(dsm) - 2, "Graphics Stolen Memory"); /* * GEN3 firmware likes to smash pci bridges into the stolen * range. Apparently this works. */ - if (r == NULL && !IS_GEN(dev_priv, 3)) { + if (!r && !IS_GEN(i915, 3)) { DRM_ERROR("conflict detected with stolen region: %pR\n", dsm); @@ -151,25 +152,27 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, return 0; } -static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) +static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) { - if (!drm_mm_initialized(&dev_priv->mm.stolen)) + if (!drm_mm_initialized(&i915->mm.stolen)) return; - drm_mm_takedown(&dev_priv->mm.stolen); + drm_mm_takedown(&i915->mm.stolen); } -static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void g4x_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u32 reg_val = I915_READ(IS_GM45(dev_priv) ? - CTG_STOLEN_RESERVED : - ELK_STOLEN_RESERVED); - resource_size_t stolen_top = dev_priv->dsm.end + 1; + u32 reg_val = intel_uncore_read(uncore, + IS_GM45(i915) ? + CTG_STOLEN_RESERVED : + ELK_STOLEN_RESERVED); + resource_size_t stolen_top = i915->dsm.end + 1; DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", - IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val); + IS_GM45(i915) ? "CTG" : "ELK", reg_val); if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) return; @@ -178,7 +181,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, * Whether ILK really reuses the ELK register for this is unclear. * Let's see if we catch anyone with this supposedly enabled on ILK. */ - WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n", + WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n", reg_val); if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) @@ -190,11 +193,12 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, *size = stolen_top - *base; } -static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void gen6_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -222,12 +226,13 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, } } -static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void vlv_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - resource_size_t stolen_top = dev_priv->dsm.end + 1; + u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); + resource_size_t stolen_top = i915->dsm.end + 1; DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -250,11 +255,12 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, *base = stolen_top - *size; } -static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void gen7_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -276,11 +282,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, } } -static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void chv_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -308,12 +315,13 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, } } -static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void bdw_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - resource_size_t stolen_top = dev_priv->dsm.end + 1; + u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); + resource_size_t stolen_top = i915->dsm.end + 1; DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); @@ -328,10 +336,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, } static void icl_get_stolen_reserved(struct drm_i915_private *i915, + struct intel_uncore *uncore, resource_size_t *base, resource_size_t *size) { - u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED); + u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); @@ -356,22 +365,23 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915, } } -static int i915_gem_init_stolen(struct drm_i915_private *dev_priv) +static int i915_gem_init_stolen(struct drm_i915_private *i915) { + struct intel_uncore *uncore = &i915->uncore; resource_size_t reserved_base, stolen_top; resource_size_t reserved_total, reserved_size; - mutex_init(&dev_priv->mm.stolen_lock); + mutex_init(&i915->mm.stolen_lock); - if (intel_vgpu_active(dev_priv)) { - dev_notice(dev_priv->drm.dev, + if (intel_vgpu_active(i915)) { + dev_notice(i915->drm.dev, "%s, disabling use of stolen memory\n", "iGVT-g active"); return 0; } - if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { - dev_notice(dev_priv->drm.dev, + if (intel_vtd_active() && INTEL_GEN(i915) < 8) { + dev_notice(i915->drm.dev, "%s, disabling use of stolen memory\n", "DMAR active"); return 0; @@ -380,58 +390,59 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv) if (resource_size(&intel_graphics_stolen_res) == 0) return 0; - dev_priv->dsm = intel_graphics_stolen_res; + i915->dsm = intel_graphics_stolen_res; - if (i915_adjust_stolen(dev_priv, &dev_priv->dsm)) + if (i915_adjust_stolen(i915, &i915->dsm)) return 0; - GEM_BUG_ON(dev_priv->dsm.start == 0); - GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); + GEM_BUG_ON(i915->dsm.start == 0); + GEM_BUG_ON(i915->dsm.end <= i915->dsm.start); - stolen_top = dev_priv->dsm.end + 1; + stolen_top = i915->dsm.end + 1; reserved_base = stolen_top; reserved_size = 0; - switch (INTEL_GEN(dev_priv)) { + switch (INTEL_GEN(i915)) { case 2: case 3: break; case 4: - if (!IS_G4X(dev_priv)) + if (!IS_G4X(i915)) break; /* fall through */ case 5: - g4x_get_stolen_reserved(dev_priv, + g4x_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); break; case 6: - gen6_get_stolen_reserved(dev_priv, + gen6_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); break; case 7: - if (IS_VALLEYVIEW(dev_priv)) - vlv_get_stolen_reserved(dev_priv, + if (IS_VALLEYVIEW(i915)) + vlv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else - gen7_get_stolen_reserved(dev_priv, + gen7_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); break; case 8: case 9: case 10: - if (IS_LP(dev_priv)) - chv_get_stolen_reserved(dev_priv, + if (IS_LP(i915)) + chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else - bdw_get_stolen_reserved(dev_priv, + bdw_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); break; default: - MISSING_CASE(INTEL_GEN(dev_priv)); + MISSING_CASE(INTEL_GEN(i915)); /* fall-through */ case 11: case 12: - icl_get_stolen_reserved(dev_priv, &reserved_base, + icl_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); break; } @@ -448,12 +459,12 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv) reserved_size = 0; } - dev_priv->dsm_reserved = - (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size); + i915->dsm_reserved = + (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); - if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) { + if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", - &dev_priv->dsm_reserved, &dev_priv->dsm); + &i915->dsm_reserved, &i915->dsm); return 0; } @@ -462,14 +473,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv) reserved_total = stolen_top - reserved_base; DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", - (u64)resource_size(&dev_priv->dsm) >> 10, - ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); + (u64)resource_size(&i915->dsm) >> 10, + ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); - dev_priv->stolen_usable_size = - resource_size(&dev_priv->dsm) - reserved_total; + i915->stolen_usable_size = + resource_size(&i915->dsm) - reserved_total; /* Basic memrange allocator for stolen space. */ - drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size); + drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size); return 0; } @@ -478,11 +489,11 @@ static struct sg_table * i915_pages_create_for_stolen(struct drm_device *dev, resource_size_t offset, resource_size_t size) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct sg_table *st; struct scatterlist *sg; - GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm))); + GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); /* We hide that we have no struct page backing our stolen object * by wrapping the contiguous physical allocation with a fake @@ -502,7 +513,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, sg->offset = 0; sg->length = size; - sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; + sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset; sg_dma_len(sg) = size; return st; @@ -533,16 +544,15 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, static void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); GEM_BUG_ON(!stolen); - i915_gem_stolen_remove_node(dev_priv, stolen); - kfree(stolen); + i915_gem_object_release_memory_region(obj); - if (obj->mm.region) - i915_gem_object_release_memory_region(obj); + i915_gem_stolen_remove_node(i915, stolen); + kfree(stolen); } static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { @@ -552,10 +562,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { }; static struct drm_i915_gem_object * -__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, - struct drm_mm_node *stolen, - struct intel_memory_region *mem) +__i915_gem_object_create_stolen(struct intel_memory_region *mem, + struct drm_mm_node *stolen) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; int err = -ENOMEM; @@ -564,20 +574,19 @@ __i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, if (!obj) goto err; - drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); - i915_gem_object_init(obj, &i915_gem_object_stolen_ops); + drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); + i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); obj->stolen = stolen; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; - cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; + cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); err = i915_gem_object_pin_pages(obj); if (err) goto cleanup; - if (mem) - i915_gem_object_init_memory_region(obj, mem, 0); + i915_gem_object_init_memory_region(obj, mem, 0); return obj; @@ -592,12 +601,12 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem, resource_size_t size, unsigned int flags) { - struct drm_i915_private *dev_priv = mem->i915; + struct drm_i915_private *i915 = mem->i915; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; - if (!drm_mm_initialized(&dev_priv->mm.stolen)) + if (!drm_mm_initialized(&i915->mm.stolen)) return ERR_PTR(-ENODEV); if (size == 0) @@ -607,30 +616,30 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem, if (!stolen) return ERR_PTR(-ENOMEM); - ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); + ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096); if (ret) { obj = ERR_PTR(ret); goto err_free; } - obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem); + obj = __i915_gem_object_create_stolen(mem, stolen); if (IS_ERR(obj)) goto err_remove; return obj; err_remove: - i915_gem_stolen_remove_node(dev_priv, stolen); + i915_gem_stolen_remove_node(i915, stolen); err_free: kfree(stolen); return obj; } struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, +i915_gem_object_create_stolen(struct drm_i915_private *i915, resource_size_t size) { - return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN], + return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN], size, I915_BO_ALLOC_CONTIGUOUS); } @@ -664,18 +673,19 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) } struct drm_i915_gem_object * -i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, +i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, resource_size_t stolen_offset, resource_size_t gtt_offset, resource_size_t size) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN]; + struct i915_ggtt *ggtt = &i915->ggtt; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; struct i915_vma *vma; int ret; - if (!drm_mm_initialized(&dev_priv->mm.stolen)) + if (!drm_mm_initialized(&i915->mm.stolen)) return ERR_PTR(-ENODEV); DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", @@ -693,19 +703,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv stolen->start = stolen_offset; stolen->size = size; - mutex_lock(&dev_priv->mm.stolen_lock); - ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); - mutex_unlock(&dev_priv->mm.stolen_lock); + mutex_lock(&i915->mm.stolen_lock); + ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); + mutex_unlock(&i915->mm.stolen_lock); if (ret) { DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); kfree(stolen); return ERR_PTR(ret); } - obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL); + obj = __i915_gem_object_create_stolen(mem, stolen); if (IS_ERR(obj)) { DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); - i915_gem_stolen_remove_node(dev_priv, stolen); + i915_gem_stolen_remove_node(i915, stolen); kfree(stolen); return obj; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 1fa592d82af5..6c7825a2dc2a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_gem.h" #include "i915_gem_ioctls.h" +#include "i915_gem_mman.h" #include "i915_gem_object.h" /** diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 4f970474013f..e5558af111e2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -129,9 +129,10 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, spin_unlock(&mn->lock); ret = i915_gem_object_unbind(obj, - I915_GEM_OBJECT_UNBIND_ACTIVE); + I915_GEM_OBJECT_UNBIND_ACTIVE | + I915_GEM_OBJECT_UNBIND_BARRIER); if (ret == 0) - ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + ret = __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); if (ret) return ret; @@ -459,31 +460,36 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; + int locked = 0; if (!i915_gem_object_is_readonly(obj)) flags |= FOLL_WRITE; ret = -EFAULT; if (mmget_not_zero(mm)) { - down_read(&mm->mmap_sem); while (pinned < npages) { + if (!locked) { + down_read(&mm->mmap_sem); + locked = 1; + } ret = get_user_pages_remote (work->task, mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, - pvec + pinned, NULL, NULL); + pvec + pinned, NULL, &locked); if (ret < 0) break; pinned += ret; } - up_read(&mm->mmap_sem); + if (locked) + up_read(&mm->mmap_sem); mmput(mm); } } - mutex_lock(&obj->mm.lock); + mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); if (obj->userptr.work == &work->work) { struct sg_table *pages = ERR_PTR(ret); @@ -646,8 +652,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, obj->mm.dirty = false; for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) + if (obj->mm.dirty && trylock_page(page)) { + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + * + * However...! + * + * The mmu-notifier can be invalidated for a + * migrate_page, that is alreadying holding the lock + * on the page. Such a try_to_unmap() will result + * in us calling put_pages() and so recursively try + * to lock the page. We avoid that deadlock with + * a trylock_page() and in exchange we risk missing + * some page dirtying. + */ set_page_dirty(page); + unlock_page(page); + } mark_page_accessed(page); put_page(page); @@ -725,6 +751,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object *obj; @@ -752,15 +779,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { - struct i915_address_space *vm; - /* * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - vm = rcu_dereference_protected(dev_priv->kernel_context->vm, - true); /* static vm */ - if (!vm || !vm->has_read_only) + if (!dev_priv->gt.vm->has_read_only) return -ENODEV; } @@ -769,7 +792,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops); + i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index 3c5d17b2b670..fa16f2c3f3ac 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -12,10 +12,14 @@ static void huge_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { unsigned long nreal = obj->scratch / PAGE_SIZE; - struct scatterlist *sg; + struct sgt_iter sgt_iter; + struct page *page; - for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg)) - __free_page(sg_page(sg)); + for_each_sgt_page(page, sgt_iter, pages) { + __free_page(page); + if (!--nreal) + break; + } sg_free_table(pages); kfree(pages); @@ -70,7 +74,6 @@ static int huge_get_pages(struct drm_i915_gem_object *obj) err: huge_free_pages(obj, pages); - return -ENOMEM; #undef GFP } @@ -96,6 +99,7 @@ huge_gem_object(struct drm_i915_private *i915, phys_addr_t phys_size, dma_addr_t dma_size) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; @@ -111,7 +115,7 @@ huge_gem_object(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); - i915_gem_object_init(obj, &huge_ops); + i915_gem_object_init(obj, &huge_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index f27772f6779a..2479395c1873 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -9,6 +9,7 @@ #include "i915_selftest.h" #include "gem/i915_gem_region.h" +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" @@ -149,6 +150,7 @@ huge_pages_object(struct drm_i915_private *i915, u64 size, unsigned int page_mask) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); @@ -165,7 +167,7 @@ huge_pages_object(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &huge_page_ops); + i915_gem_object_init(obj, &huge_page_ops, &lock_class); i915_gem_object_set_volatile(obj); @@ -295,6 +297,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = { static struct drm_i915_gem_object * fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); @@ -313,9 +316,9 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) drm_gem_private_object_init(&i915->drm, &obj->base, size); if (single) - i915_gem_object_init(obj, &fake_ops_single); + i915_gem_object_init(obj, &fake_ops_single, &lock_class); else - i915_gem_object_init(obj, &fake_ops); + i915_gem_object_init(obj, &fake_ops, &lock_class); i915_gem_object_set_volatile(obj); @@ -514,7 +517,7 @@ static int igt_mock_memory_region_huge_pages(void *arg) i915_vma_unpin(vma); i915_vma_close(vma); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } } @@ -647,7 +650,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) i915_vma_close(vma); i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } @@ -675,7 +678,7 @@ static void close_object_list(struct list_head *objects, list_del(&obj->st_link); i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } } @@ -945,7 +948,7 @@ static int igt_mock_ppgtt_64K(void *arg) i915_vma_close(vma); i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } } @@ -981,7 +984,8 @@ static int gpu_write(struct intel_context *ce, vma->size >> PAGE_SHIFT, val); } -static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +static int +__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) { unsigned int needs_flush; unsigned long n; @@ -1013,6 +1017,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) return err; } +static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + unsigned long n; + int err; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, false); + i915_gem_object_unlock(obj); + if (err) + return err; + + err = i915_gem_object_pin_pages(obj); + if (err) + return err; + + for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { + u32 __iomem *base; + u32 read_val; + + base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + + read_val = ioread32(base + dword); + io_mapping_unmap_atomic(base); + if (read_val != val) { + pr_err("n=%lu base[%u]=%u, val=%u\n", + n, dword, read_val, val); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_pages(obj); + return err; +} + +static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + if (i915_gem_object_has_struct_page(obj)) + return __cpu_check_shmem(obj, dword, val); + else if (i915_gem_object_is_lmem(obj)) + return __cpu_check_lmem(obj, dword, val); + + return -ENODEV; +} + static int __igt_write_huge(struct intel_context *ce, struct drm_i915_gem_object *obj, u64 size, u64 offset, @@ -1061,8 +1110,7 @@ static int __igt_write_huge(struct intel_context *ce, out_vma_unpin: i915_vma_unpin(vma); out_vma_close: - i915_vma_destroy(vma); - + __i915_vma_put(vma); return err; } @@ -1252,7 +1300,7 @@ static int igt_ppgtt_exhaust_huge(void *arg) } i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); i915_gem_object_put(obj); } } @@ -1268,131 +1316,235 @@ out_device: return err; } -static int igt_ppgtt_internal_huge(void *arg) +typedef struct drm_i915_gem_object * +(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags); + +static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) +{ + return i915->mm.gemfs && has_transparent_hugepage(); +} + +static struct drm_i915_gem_object * +igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags) +{ + if (!igt_can_allocate_thp(i915)) { + pr_info("%s missing THP support, skipping\n", __func__); + return ERR_PTR(-ENODEV); + } + + return i915_gem_object_create_shmem(i915, size); +} + +static struct drm_i915_gem_object * +igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags) +{ + return i915_gem_object_create_internal(i915, size); +} + +static struct drm_i915_gem_object * +igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags) +{ + return huge_pages_object(i915, size, size); +} + +static struct drm_i915_gem_object * +igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags) +{ + return i915_gem_object_create_lmem(i915, size, flags); +} + +static u32 igt_random_size(struct rnd_state *prng, + u32 min_page_size, + u32 max_page_size) +{ + u64 mask; + u32 size; + + GEM_BUG_ON(!is_power_of_2(min_page_size)); + GEM_BUG_ON(!is_power_of_2(max_page_size)); + GEM_BUG_ON(min_page_size < PAGE_SIZE); + GEM_BUG_ON(min_page_size > max_page_size); + + mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK; + size = prandom_u32_state(prng) & mask; + if (size < min_page_size) + size |= min_page_size; + + return size; +} + +static int igt_ppgtt_smoke_huge(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_64K, - SZ_128K, - SZ_256K, - SZ_512K, - SZ_1M, - SZ_2M, + I915_RND_STATE(prng); + struct { + igt_create_fn fn; + u32 min; + u32 max; + } backends[] = { + { igt_create_internal, SZ_64K, SZ_2M, }, + { igt_create_shmem, SZ_64K, SZ_32M, }, + { igt_create_local, SZ_64K, SZ_1G, }, }; - int i; int err; + int i; /* - * Sanity check that the HW uses huge pages correctly through internal - * -- ensure that our writes land in the right place. + * Sanity check that the HW uses huge pages correctly through our + * various backends -- ensure that our writes land in the right place. */ - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; + for (i = 0; i < ARRAY_SIZE(backends); ++i) { + u32 min = backends[i].min; + u32 max = backends[i].max; + u32 size = max; +try_again: + size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + obj = backends[i].fn(i915, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + if (err == -E2BIG) { + size >>= 1; + goto try_again; + } else if (err == -ENODEV) { + err = 0; + continue; + } + + return err; + } err = i915_gem_object_pin_pages(obj); - if (err) + if (err) { + if (err == -ENXIO || err == -E2BIG) { + i915_gem_object_put(obj); + size >>= 1; + goto try_again; + } goto out_put; + } - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { - pr_info("internal unable to allocate huge-page(s) with size=%u\n", - size); + if (obj->mm.page_sizes.phys < min) { + pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n", + __func__, size, i); + err = -ENOMEM; goto out_unpin; } err = igt_write_huge(ctx, obj); if (err) { - pr_err("internal write-huge failed with size=%u\n", - size); - goto out_unpin; + pr_err("%s write-huge failed with size=%u, i=%d\n", + __func__, size, i); } - +out_unpin: i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); +out_put: i915_gem_object_put(obj); - } - return 0; + if (err == -ENOMEM || err == -ENXIO) + err = 0; -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); + if (err) + break; - return err; -} + cond_resched(); + } -static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) -{ - return i915->mm.gemfs && has_transparent_hugepage(); + return err; } -static int igt_ppgtt_gemfs_huge(void *arg) +static int igt_ppgtt_sanity_check(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_2M, - SZ_4M, - SZ_8M, - SZ_16M, - SZ_32M, + unsigned int supported = INTEL_INFO(i915)->page_sizes; + struct { + igt_create_fn fn; + unsigned int flags; + } backends[] = { + { igt_create_system, 0, }, + { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, }; - int i; + struct { + u32 size; + u32 pages; + } combos[] = { + { SZ_64K, SZ_64K }, + { SZ_2M, SZ_2M }, + { SZ_2M, SZ_64K }, + { SZ_2M - SZ_64K, SZ_64K }, + { SZ_2M - SZ_4K, SZ_64K | SZ_4K }, + { SZ_2M + SZ_4K, SZ_64K | SZ_4K }, + { SZ_2M + SZ_4K, SZ_2M | SZ_4K }, + { SZ_2M + SZ_64K, SZ_2M | SZ_64K }, + }; + int i, j; int err; + if (supported == I915_GTT_PAGE_SIZE_4K) + return 0; + /* - * Sanity check that the HW uses huge pages correctly through gemfs -- - * ensure that our writes land in the right place. + * Sanity check that the HW behaves with a limited set of combinations. + * We already have a bunch of randomised testing, which should give us + * a decent amount of variation between runs, however we should keep + * this to limit the chances of introducing a temporary regression, by + * testing the most obvious cases that might make something blow up. */ - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - return 0; - } + for (i = 0; i < ARRAY_SIZE(backends); ++i) { + for (j = 0; j < ARRAY_SIZE(combos); ++j) { + struct drm_i915_gem_object *obj; + u32 size = combos[j].size; + u32 pages = combos[j].pages; + + obj = backends[i].fn(i915, size, backends[i].flags); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + if (err == -ENODEV) { + pr_info("Device lacks local memory, skipping\n"); + err = 0; + break; + } - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; + return err; + } - obj = i915_gem_object_create_shmem(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_put(obj); + goto out; + } - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; + GEM_BUG_ON(pages > obj->base.size); + pages = pages & supported; - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { - pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } + if (pages) + obj->mm.page_sizes.sg = pages; - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("gemfs write-huge failed with size=%u\n", - size); - goto out_unpin; + err = igt_write_huge(ctx, obj); + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj); + i915_gem_object_put(obj); + + if (err) { + pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n", + __func__, size, pages, i, j); + goto out; + } } - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); + cond_resched(); } - return 0; - -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); +out: + if (err == -ENOMEM) + err = 0; return err; } @@ -1756,12 +1908,12 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_pin_update), SUBTEST(igt_tmpfs_fallback), SUBTEST(igt_ppgtt_exhaust_huge), - SUBTEST(igt_ppgtt_gemfs_huge), - SUBTEST(igt_ppgtt_internal_huge), + SUBTEST(igt_ppgtt_smoke_huge), + SUBTEST(igt_ppgtt_sanity_check), }; - struct drm_file *file; struct i915_gem_context *ctx; struct i915_address_space *vm; + struct file *file; int err; if (!HAS_PPGTT(i915)) { @@ -1791,6 +1943,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) err = i915_subtests(tests, ctx); out_file: - mock_file_free(i915, file); + fput(file); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index d8804a847945..b972be165e85 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -5,6 +5,7 @@ #include "i915_selftest.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "selftests/igt_flush_test.h" @@ -12,10 +13,9 @@ #include "huge_gem_object.h" #include "mock_context.h" -static int igt_client_fill(void *arg) +static int __igt_client_fill(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = arg; - struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct intel_context *ce = engine->kernel_context; struct drm_i915_gem_object *obj; struct rnd_state prng; IGT_TIMEOUT(end); @@ -24,6 +24,7 @@ static int igt_client_fill(void *arg) prandom_seed_state(&prng, i915_selftest.random_seed); + intel_engine_pm_get(engine); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); @@ -37,7 +38,7 @@ static int igt_client_fill(void *arg) pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, phys_sz, sz, val); - obj = huge_gem_object(i915, phys_sz, sz); + obj = huge_gem_object(engine->i915, phys_sz, sz); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto err_flush; @@ -99,10 +100,33 @@ err_put: err_flush: if (err == -ENOMEM) err = 0; + intel_engine_pm_put(engine); return err; } +static int igt_client_fill(void *arg) +{ + int inst = 0; + + do { + struct intel_engine_cs *engine; + int err; + + engine = intel_engine_lookup_user(arg, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + err = __igt_client_fill(engine); + if (err == -ENOMEM) + err = 0; + if (err) + return err; + } while (1); +} + int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 549810f70aeb..49edc51111d5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -6,15 +6,20 @@ #include <linux/prime_numbers.h> +#include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_ring.h" #include "i915_selftest.h" #include "selftests/i915_random.h" -static int cpu_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +struct context { + struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; +}; + +static int cpu_set(struct context *ctx, unsigned long offset, u32 v) { unsigned int needs_clflush; struct page *page; @@ -22,11 +27,11 @@ static int cpu_set(struct drm_i915_gem_object *obj, u32 *cpu; int err; - err = i915_gem_object_prepare_write(obj, &needs_clflush); + err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush); if (err) return err; - page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); cpu = map + offset_in_page(offset); @@ -39,14 +44,12 @@ static int cpu_set(struct drm_i915_gem_object *obj, drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap_atomic(map); - i915_gem_object_finish_access(obj); + i915_gem_object_finish_access(ctx->obj); return 0; } -static int cpu_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) +static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) { unsigned int needs_clflush; struct page *page; @@ -54,11 +57,11 @@ static int cpu_get(struct drm_i915_gem_object *obj, u32 *cpu; int err; - err = i915_gem_object_prepare_read(obj, &needs_clflush); + err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush); if (err) return err; - page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); cpu = map + offset_in_page(offset); @@ -68,26 +71,24 @@ static int cpu_get(struct drm_i915_gem_object *obj, *v = *cpu; kunmap_atomic(map); - i915_gem_object_finish_access(obj); + i915_gem_object_finish_access(ctx->obj); return 0; } -static int gtt_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +static int gtt_set(struct context *ctx, unsigned long offset, u32 v) { struct i915_vma *vma; u32 __iomem *map; int err = 0; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); + i915_gem_object_unlock(ctx->obj); if (err) return err; - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -108,21 +109,19 @@ out_rpm: return err; } -static int gtt_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) +static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) { struct i915_vma *vma; u32 __iomem *map; int err = 0; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_gtt_domain(ctx->obj, false); + i915_gem_object_unlock(ctx->obj); if (err) return err; - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -143,73 +142,66 @@ out_rpm: return err; } -static int wc_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +static int wc_set(struct context *ctx, unsigned long offset, u32 v) { u32 *map; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, true); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_wc_domain(ctx->obj, true); + i915_gem_object_unlock(ctx->obj); if (err) return err; - map = i915_gem_object_pin_map(obj, I915_MAP_WC); + map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); map[offset / sizeof(*map)] = v; - i915_gem_object_unpin_map(obj); + i915_gem_object_unpin_map(ctx->obj); return 0; } -static int wc_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) +static int wc_get(struct context *ctx, unsigned long offset, u32 *v) { u32 *map; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, false); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_wc_domain(ctx->obj, false); + i915_gem_object_unlock(ctx->obj); if (err) return err; - map = i915_gem_object_pin_map(obj, I915_MAP_WC); + map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); *v = map[offset / sizeof(*map)]; - i915_gem_object_unpin_map(obj); + i915_gem_object_unpin_map(ctx->obj); return 0; } -static int gpu_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +static int gpu_set(struct context *ctx, unsigned long offset, u32 v) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_request *rq; struct i915_vma *vma; u32 *cs; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); + i915_gem_object_unlock(ctx->obj); if (err) return err; - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0); if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_create(i915->engine[RCS0]->kernel_context); + rq = intel_engine_create_kernel_request(ctx->engine); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -222,12 +214,12 @@ static int gpu_set(struct drm_i915_gem_object *obj, return PTR_ERR(cs); } - if (INTEL_GEN(i915) >= 8) { + if (INTEL_GEN(ctx->engine->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; - } else if (INTEL_GEN(i915) >= 4) { + } else if (INTEL_GEN(ctx->engine->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; @@ -252,32 +244,34 @@ static int gpu_set(struct drm_i915_gem_object *obj, return err; } -static bool always_valid(struct drm_i915_private *i915) +static bool always_valid(struct context *ctx) { return true; } -static bool needs_fence_registers(struct drm_i915_private *i915) +static bool needs_fence_registers(struct context *ctx) { - return !intel_gt_is_wedged(&i915->gt); -} + struct intel_gt *gt = ctx->engine->gt; -static bool needs_mi_store_dword(struct drm_i915_private *i915) -{ - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(gt)) return false; - if (!HAS_ENGINE(i915, RCS0)) + return gt->ggtt->num_fences; +} + +static bool needs_mi_store_dword(struct context *ctx) +{ + if (intel_gt_is_wedged(ctx->engine->gt)) return false; - return intel_engine_can_store_dword(i915->engine[RCS0]); + return intel_engine_can_store_dword(ctx->engine); } static const struct igt_coherency_mode { const char *name; - int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v); - int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v); - bool (*valid)(struct drm_i915_private *i915); + int (*set)(struct context *ctx, unsigned long offset, u32 v); + int (*get)(struct context *ctx, unsigned long offset, u32 *v); + bool (*valid)(struct context *ctx); } igt_coherency_mode[] = { { "cpu", cpu_set, cpu_get, always_valid }, { "gtt", gtt_set, gtt_get, needs_fence_registers }, @@ -286,18 +280,37 @@ static const struct igt_coherency_mode { { }, }; +static struct intel_engine_cs * +random_engine(struct drm_i915_private *i915, struct rnd_state *prng) +{ + struct intel_engine_cs *engine; + unsigned int count; + + count = 0; + for_each_uabi_engine(engine, i915) + count++; + + count = i915_prandom_u32_max_state(count, prng); + for_each_uabi_engine(engine, i915) + if (count-- == 0) + return engine; + + return NULL; +} + static int igt_gem_coherency(void *arg) { const unsigned int ncachelines = PAGE_SIZE/64; - I915_RND_STATE(prng); struct drm_i915_private *i915 = arg; const struct igt_coherency_mode *read, *write, *over; - struct drm_i915_gem_object *obj; unsigned long count, n; u32 *offsets, *values; + I915_RND_STATE(prng); + struct context ctx; int err = 0; - /* We repeatedly write, overwrite and read from a sequence of + /* + * We repeatedly write, overwrite and read from a sequence of * cachelines in order to try and detect incoherency (unflushed writes * from either the CPU or GPU). Each setter/getter uses our cache * domain API which should prevent incoherency. @@ -311,31 +324,36 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; + ctx.engine = random_engine(i915, &prng); + GEM_BUG_ON(!ctx.engine); + pr_info("%s: using %s\n", __func__, ctx.engine->name); + intel_engine_pm_get(ctx.engine); + for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; - if (!over->valid(i915)) + if (!over->valid(&ctx)) continue; for (write = igt_coherency_mode; write->name; write++) { if (!write->set) continue; - if (!write->valid(i915)) + if (!write->valid(&ctx)) continue; for (read = igt_coherency_mode; read->name; read++) { if (!read->get) continue; - if (!read->valid(i915)) + if (!read->valid(&ctx)) continue; for_each_prime_number_from(count, 1, ncachelines) { - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); + ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(ctx.obj)) { + err = PTR_ERR(ctx.obj); goto free; } @@ -344,7 +362,7 @@ static int igt_gem_coherency(void *arg) values[n] = prandom_u32_state(&prng); for (n = 0; n < count; n++) { - err = over->set(obj, offsets[n], ~values[n]); + err = over->set(&ctx, offsets[n], ~values[n]); if (err) { pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n", n, count, over->name, err); @@ -353,7 +371,7 @@ static int igt_gem_coherency(void *arg) } for (n = 0; n < count; n++) { - err = write->set(obj, offsets[n], values[n]); + err = write->set(&ctx, offsets[n], values[n]); if (err) { pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n", n, count, write->name, err); @@ -364,7 +382,7 @@ static int igt_gem_coherency(void *arg) for (n = 0; n < count; n++) { u32 found; - err = read->get(obj, offsets[n], &found); + err = read->get(&ctx, offsets[n], &found); if (err) { pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n", n, count, read->name, err); @@ -382,17 +400,18 @@ static int igt_gem_coherency(void *arg) } } - i915_gem_object_put(obj); + i915_gem_object_put(ctx.obj); } } } } free: + intel_engine_pm_put(ctx.engine); kfree(offsets); return err; put_object: - i915_gem_object_put(obj); + i915_gem_object_put(ctx.obj); goto free; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index e5c235051ae5..7fc46861a54d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" @@ -26,15 +27,20 @@ #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) +static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx) +{ + /* single threaded, private ctx */ + return rcu_dereference_protected(ctx->vm, true); +} + static int live_nop_switch(void *arg) { const unsigned int nctx = 1024; struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_gem_context **ctx; - enum intel_engine_id id; struct igt_live_test t; - struct drm_file *file; + struct file *file; unsigned long n; int err = -ENODEV; @@ -67,26 +73,35 @@ static int live_nop_switch(void *arg) } } - for_each_engine(engine, i915, id) { - struct i915_request *rq; + for_each_uabi_engine(engine, i915) { + struct i915_request *rq = NULL; unsigned long end_time, prime; ktime_t times[2] = {}; times[0] = ktime_get_raw(); for (n = 0; n < nctx; n++) { - rq = igt_request_alloc(ctx[n], engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); + struct i915_request *this; + + this = igt_request_alloc(ctx[n], engine); + if (IS_ERR(this)) { + err = PTR_ERR(this); goto out_file; } - i915_request_add(rq); + if (rq) { + i915_request_await_dma_fence(this, &rq->fence); + i915_request_put(rq); + } + rq = i915_request_get(this); + i915_request_add(this); } if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Failed to populated %d contexts\n", nctx); intel_gt_set_wedged(&i915->gt); + i915_request_put(rq); err = -EIO; goto out_file; } + i915_request_put(rq); times[1] = ktime_get_raw(); @@ -101,13 +116,21 @@ static int live_nop_switch(void *arg) for_each_prime_number_from(prime, 2, 8192) { times[1] = ktime_get_raw(); + rq = NULL; for (n = 0; n < prime; n++) { - rq = igt_request_alloc(ctx[n % nctx], engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); + struct i915_request *this; + + this = igt_request_alloc(ctx[n % nctx], engine); + if (IS_ERR(this)) { + err = PTR_ERR(this); goto out_file; } + if (rq) { /* Force submission order */ + i915_request_await_dma_fence(this, &rq->fence); + i915_request_put(rq); + } + /* * This space is left intentionally blank. * @@ -122,14 +145,18 @@ static int live_nop_switch(void *arg) * for latency. */ - i915_request_add(rq); + rq = i915_request_get(this); + i915_request_add(this); } + GEM_BUG_ON(!rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Switching between %ld contexts timed out\n", prime); intel_gt_set_wedged(&i915->gt); + i915_request_put(rq); break; } + i915_request_put(rq); times[1] = ktime_sub(ktime_get_raw(), times[1]); if (prime == 2) @@ -150,7 +177,7 @@ static int live_nop_switch(void *arg) } out_file: - mock_file_free(i915, file); + fput(file); return err; } @@ -170,18 +197,24 @@ static int __live_parallel_switch1(void *data) struct i915_request *rq = NULL; int err, n; - for (n = 0; n < ARRAY_SIZE(arg->ce); n++) { - i915_request_put(rq); + err = 0; + for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) { + struct i915_request *prev = rq; rq = i915_request_create(arg->ce[n]); - if (IS_ERR(rq)) + if (IS_ERR(rq)) { + i915_request_put(prev); return PTR_ERR(rq); + } i915_request_get(rq); + if (prev) { + err = i915_request_await_dma_fence(rq, &prev->fence); + i915_request_put(prev); + } + i915_request_add(rq); } - - err = 0; if (i915_request_wait(rq, 0, HZ / 5) < 0) err = -ETIME; i915_request_put(rq); @@ -198,6 +231,7 @@ static int __live_parallel_switch1(void *data) static int __live_parallel_switchN(void *data) { struct parallel_switch *arg = data; + struct i915_request *rq = NULL; IGT_TIMEOUT(end_time); unsigned long count; int n; @@ -205,17 +239,31 @@ static int __live_parallel_switchN(void *data) count = 0; do { for (n = 0; n < ARRAY_SIZE(arg->ce); n++) { - struct i915_request *rq; + struct i915_request *prev = rq; + int err = 0; rq = i915_request_create(arg->ce[n]); - if (IS_ERR(rq)) + if (IS_ERR(rq)) { + i915_request_put(prev); return PTR_ERR(rq); + } + + i915_request_get(rq); + if (prev) { + err = i915_request_await_dma_fence(rq, &prev->fence); + i915_request_put(prev); + } i915_request_add(rq); + if (err) { + i915_request_put(rq); + return err; + } } count++; } while (!__igt_timeout(end_time, NULL)); + i915_request_put(rq); pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count); return 0; @@ -235,7 +283,7 @@ static int live_parallel_switch(void *arg) int (* const *fn)(void *arg); struct i915_gem_context *ctx; struct intel_context *ce; - struct drm_file *file; + struct file *file; int n, m, count; int err = 0; @@ -289,7 +337,7 @@ static int live_parallel_switch(void *arg) if (!data[m].ce[0]) continue; - ce = intel_context_create(ctx, data[m].ce[0]->engine); + ce = intel_context_create(data[m].ce[0]->engine); if (IS_ERR(ce)) goto out; @@ -325,6 +373,8 @@ static int live_parallel_switch(void *arg) get_task_struct(data[n].tsk); } + yield(); /* start all threads before we kthread_stop() */ + for (n = 0; n < count; n++) { int status; @@ -355,7 +405,7 @@ out: } kfree(data); out_file: - mock_file_free(i915, file); + fput(file); return err; } @@ -480,17 +530,17 @@ out_unmap: return err; } -static int file_add_object(struct drm_file *file, - struct drm_i915_gem_object *obj) +static int file_add_object(struct file *file, struct drm_i915_gem_object *obj) { int err; GEM_BUG_ON(obj->base.handle_count); /* tie the object to the drm_file for easy reaping */ - err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL); + err = idr_alloc(&to_drm_file(file)->object_idr, + &obj->base, 1, 0, GFP_KERNEL); if (err < 0) - return err; + return err; i915_gem_object_get(obj); obj->base.handle_count++; @@ -499,7 +549,7 @@ static int file_add_object(struct drm_file *file, static struct drm_i915_gem_object * create_test_object(struct i915_address_space *vm, - struct drm_file *file, + struct file *file, struct list_head *objects) { struct drm_i915_gem_object *obj; @@ -583,7 +633,6 @@ static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - enum intel_engine_id id; int err = -ENODEV; /* @@ -595,14 +644,14 @@ static int igt_ctx_exec(void *arg) if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct drm_i915_gem_object *obj = NULL; unsigned long ncontexts, ndwords, dw; struct i915_request *tq[5] = {}; struct igt_live_test t; - struct drm_file *file; IGT_TIMEOUT(end_time); LIST_HEAD(objects); + struct file *file; if (!intel_engine_can_store_dword(engine)) continue; @@ -695,7 +744,7 @@ out_file: if (igt_live_test_end(&t)) err = -EIO; - mock_file_free(i915, file); + fput(file); if (err) return err; @@ -711,9 +760,8 @@ static int igt_shared_ctx_exec(void *arg) struct i915_request *tq[5] = {}; struct i915_gem_context *parent; struct intel_engine_cs *engine; - enum intel_engine_id id; struct igt_live_test t; - struct drm_file *file; + struct file *file; int err = 0; /* @@ -743,7 +791,7 @@ static int igt_shared_ctx_exec(void *arg) if (err) goto out_file; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { unsigned long ncontexts, ndwords, dw; struct drm_i915_gem_object *obj = NULL; IGT_TIMEOUT(end_time); @@ -766,14 +814,15 @@ static int igt_shared_ctx_exec(void *arg) } mutex_lock(&ctx->mutex); - __assign_ppgtt(ctx, parent->vm); + __assign_ppgtt(ctx, ctx_vm(parent)); mutex_unlock(&ctx->mutex); ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); GEM_BUG_ON(IS_ERR(ce)); if (!obj) { - obj = create_test_object(parent->vm, file, &objects); + obj = create_test_object(ctx_vm(parent), + file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); intel_context_put(ce); @@ -834,7 +883,7 @@ out_test: if (igt_live_test_end(&t)) err = -EIO; out_file: - mock_file_free(i915, file); + fput(file); return err; } @@ -1120,8 +1169,7 @@ out: igt_spinner_end(spin); if ((flags & TEST_IDLE) && ret == 0) { - ret = intel_gt_wait_for_idle(ce->engine->gt, - MAX_SCHEDULE_TIMEOUT); + ret = igt_flush_test(ce->engine->i915); if (ret) return ret; @@ -1143,9 +1191,11 @@ __sseu_test(const char *name, struct igt_spinner *spin = NULL; int ret; + intel_engine_pm_get(ce->engine); + ret = __sseu_prepare(name, flags, ce, &spin); if (ret) - return ret; + goto out_pm; ret = intel_context_reconfigure_sseu(ce, sseu); if (ret) @@ -1160,6 +1210,8 @@ out_spin: igt_spinner_fini(spin); kfree(spin); } +out_pm: + intel_engine_pm_put(ce->engine); return ret; } @@ -1168,93 +1220,89 @@ __igt_ctx_sseu(struct drm_i915_private *i915, const char *name, unsigned int flags) { - struct intel_engine_cs *engine = i915->engine[RCS0]; struct drm_i915_gem_object *obj; - struct i915_gem_context *ctx; - struct intel_context *ce; - struct intel_sseu pg_sseu; - struct drm_file *file; - int ret; - - if (INTEL_GEN(i915) < 9 || !engine) - return 0; - - if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) - return 0; + int inst = 0; + int ret = 0; - if (hweight32(engine->sseu.slice_mask) < 2) + if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg) return 0; - /* - * Gen11 VME friendly power-gated configuration with half enabled - * sub-slices. - */ - pg_sseu = engine->sseu; - pg_sseu.slice_mask = 1; - pg_sseu.subslice_mask = - ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); - - pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", - name, flags, hweight32(engine->sseu.slice_mask), - hweight32(pg_sseu.slice_mask)); - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - if (flags & TEST_RESET) igt_global_reset_lock(&i915->gt); - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - ret = PTR_ERR(ctx); - goto out_unlock; - } - i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto out_unlock; } - ce = i915_gem_context_get_engine(ctx, RCS0); - if (IS_ERR(ce)) { - ret = PTR_ERR(ce); - goto out_put; - } + do { + struct intel_engine_cs *engine; + struct intel_context *ce; + struct intel_sseu pg_sseu; - ret = intel_context_pin(ce); - if (ret) - goto out_context; + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_RENDER, + inst++); + if (!engine) + break; - /* First set the default mask. */ - ret = __sseu_test(name, flags, ce, obj, engine->sseu); - if (ret) - goto out_fail; + if (hweight32(engine->sseu.slice_mask) < 2) + continue; - /* Then set a power-gated configuration. */ - ret = __sseu_test(name, flags, ce, obj, pg_sseu); - if (ret) - goto out_fail; + /* + * Gen11 VME friendly power-gated configuration with + * half enabled sub-slices. + */ + pg_sseu = engine->sseu; + pg_sseu.slice_mask = 1; + pg_sseu.subslice_mask = + ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); + + pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", + engine->name, name, flags, + hweight32(engine->sseu.slice_mask), + hweight32(pg_sseu.slice_mask)); + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + goto out_put; + } - /* Back to defaults. */ - ret = __sseu_test(name, flags, ce, obj, engine->sseu); - if (ret) - goto out_fail; + ret = intel_context_pin(ce); + if (ret) + goto out_ce; - /* One last power-gated configuration for the road. */ - ret = __sseu_test(name, flags, ce, obj, pg_sseu); - if (ret) - goto out_fail; + /* First set the default mask. */ + ret = __sseu_test(name, flags, ce, obj, engine->sseu); + if (ret) + goto out_unpin; + + /* Then set a power-gated configuration. */ + ret = __sseu_test(name, flags, ce, obj, pg_sseu); + if (ret) + goto out_unpin; + + /* Back to defaults. */ + ret = __sseu_test(name, flags, ce, obj, engine->sseu); + if (ret) + goto out_unpin; + + /* One last power-gated configuration for the road. */ + ret = __sseu_test(name, flags, ce, obj, pg_sseu); + if (ret) + goto out_unpin; + +out_unpin: + intel_context_unpin(ce); +out_ce: + intel_context_put(ce); + } while (!ret); -out_fail: if (igt_flush_test(i915)) ret = -EIO; - intel_context_unpin(ce); -out_context: - intel_context_put(ce); out_put: i915_gem_object_put(obj); @@ -1262,8 +1310,6 @@ out_unlock: if (flags & TEST_RESET) igt_global_reset_unlock(&i915->gt); - mock_file_free(i915, file); - if (ret) pr_err("%s: Failed with %d!\n", name, ret); @@ -1296,16 +1342,18 @@ static int igt_ctx_sseu(void *arg) static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; + unsigned long idx, ndwords, dw, num_engines; struct drm_i915_gem_object *obj = NULL; struct i915_request *tq[5] = {}; + struct i915_gem_engines_iter it; struct i915_address_space *vm; struct i915_gem_context *ctx; - unsigned long idx, ndwords, dw; + struct intel_context *ce; struct igt_live_test t; - struct drm_file *file; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); LIST_HEAD(objects); + struct file *file; int err = -ENODEV; /* @@ -1328,21 +1376,21 @@ static int igt_ctx_readonly(void *arg) goto out_file; } - rcu_read_lock(); - vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm; + vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm; if (!vm || !vm->has_read_only) { - rcu_read_unlock(); err = 0; goto out_file; } - rcu_read_unlock(); + + num_engines = 0; + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) + if (intel_engine_can_store_dword(ce->engine)) + num_engines++; + i915_gem_context_unlock_engines(ctx); ndwords = 0; dw = 0; while (!time_after(jiffies, end_time)) { - struct i915_gem_engines_iter it; - struct intel_context *ce; - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { if (!intel_engine_can_store_dword(ce->engine)) @@ -1365,7 +1413,7 @@ static int igt_ctx_readonly(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), ce->engine->name, - yesno(!!rcu_access_pointer(ctx->vm)), + yesno(!!ctx_vm(ctx)), err); i915_gem_context_unlock_engines(ctx); goto out_file; @@ -1385,8 +1433,8 @@ static int igt_ctx_readonly(void *arg) } i915_gem_context_unlock_engines(ctx); } - pr_info("Submitted %lu dwords (across %u engines)\n", - ndwords, RUNTIME_INFO(i915)->num_engines); + pr_info("Submitted %lu dwords (across %lu engines)\n", + ndwords, num_engines); dw = 0; idx = 0; @@ -1411,7 +1459,7 @@ out_file: if (igt_live_test_end(&t)) err = -EIO; - mock_file_free(i915, file); + fput(file); return err; } @@ -1451,7 +1499,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto err; + goto out; } *cmd++ = MI_STORE_DWORD_IMM_GEN4; @@ -1473,12 +1521,12 @@ static int write_to_scratch(struct i915_gem_context *ctx, vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err_vm; + goto out_vm; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); if (err) - goto err_vm; + goto out_vm; err = check_scratch(vm, offset); if (err) @@ -1502,22 +1550,20 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto skip_request; - i915_vma_unpin_and_release(&vma, 0); + i915_vma_unpin(vma); i915_request_add(rq); - i915_vm_put(vm); - return 0; - + goto out_vm; skip_request: i915_request_skip(rq, err); err_request: i915_request_add(rq); err_unpin: i915_vma_unpin(vma); -err_vm: +out_vm: i915_vm_put(vm); -err: +out: i915_gem_object_put(obj); return err; } @@ -1545,7 +1591,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto err; + goto out; } memset(cmd, POISON_INUSE, PAGE_SIZE); @@ -1577,12 +1623,12 @@ static int read_from_scratch(struct i915_gem_context *ctx, vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err_vm; + goto out_vm; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); if (err) - goto err_vm; + goto out_vm; err = check_scratch(vm, offset); if (err) @@ -1615,29 +1661,27 @@ static int read_from_scratch(struct i915_gem_context *ctx, err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (err) - goto err_vm; + goto out_vm; cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto err_vm; + goto out_vm; } *value = cmd[result / sizeof(*cmd)]; i915_gem_object_unpin_map(obj); - i915_gem_object_put(obj); - - return 0; + goto out_vm; skip_request: i915_request_skip(rq, err); err_request: i915_request_add(rq); err_unpin: i915_vma_unpin(vma); -err_vm: +out_vm: i915_vm_put(vm); -err: +out: i915_gem_object_put(obj); return err; } @@ -1646,12 +1690,11 @@ static int igt_vm_isolation(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; + unsigned long num_engines, count; struct intel_engine_cs *engine; struct igt_live_test t; - struct drm_file *file; I915_RND_STATE(prng); - unsigned long count; - unsigned int id; + struct file *file; u64 vm_total; int err; @@ -1684,15 +1727,16 @@ static int igt_vm_isolation(void *arg) } /* We can only test vm isolation, if the vm are distinct */ - if (ctx_a->vm == ctx_b->vm) + if (ctx_vm(ctx_a) == ctx_vm(ctx_b)) goto out_file; - vm_total = ctx_a->vm->total; - GEM_BUG_ON(ctx_b->vm->total != vm_total); + vm_total = ctx_vm(ctx_a)->total; + GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; count = 0; - for_each_engine(engine, i915, id) { + num_engines = 0; + for_each_uabi_engine(engine, i915) { IGT_TIMEOUT(end_time); unsigned long this = 0; @@ -1729,14 +1773,15 @@ static int igt_vm_isolation(void *arg) this++; } count += this; + num_engines++; } - pr_info("Checked %lu scratch offsets across %d engines\n", - count, RUNTIME_INFO(i915)->num_engines); + pr_info("Checked %lu scratch offsets across %lu engines\n", + count, num_engines); out_file: if (igt_live_test_end(&t)) err = -EIO; - mock_file_free(i915, file); + fput(file); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 65d4dbf91999..cbf796da64e3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -6,12 +6,14 @@ #include <linux/prime_numbers.h> +#include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" +#include "selftests/igt_mmap.h" struct tile { unsigned int width; @@ -161,7 +163,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, kunmap(p); out: - i915_vma_destroy(vma); + __i915_vma_put(vma); return err; } @@ -255,7 +257,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, if (err) return err; - i915_vma_destroy(vma); + __i915_vma_put(vma); if (igt_timeout(end_time, "%s: timed out after tiling=%d stride=%d\n", @@ -301,6 +303,9 @@ static int igt_partial_tiling(void *arg) int tiling; int err; + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return 0; + /* We want to check the page mapping and fencing of a large object * mmapped through the GTT. The object we create is larger than can * possibly be mmaped as a whole, and so we must use partial GGTT vma. @@ -431,6 +436,9 @@ static int igt_smoke_tiling(void *arg) IGT_TIMEOUT(end); int err; + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return 0; + /* * igt_partial_tiling() does an exhastive check of partial tiling * chunking, but will undoubtably run out of time. Here, we do a @@ -515,22 +523,21 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_engine_cs *engine; - enum intel_engine_id id; - struct i915_vma *vma; - int err; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); + for_each_uabi_engine(engine, i915) { + struct i915_request *rq; + struct i915_vma *vma; + int err; - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); - for_each_engine(engine, i915, id) { - struct i915_request *rq; + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -544,12 +551,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) i915_vma_unlock(vma); i915_request_add(rq); + i915_vma_unpin(vma); + if (err) + return err; } - i915_vma_unpin(vma); i915_gem_object_put(obj); /* leave it only alive via its active ref */ - - return err; + return 0; } static bool assert_mmap_offset(struct drm_i915_private *i915, @@ -557,16 +565,16 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, int expected) { struct drm_i915_gem_object *obj; - int err; + struct i915_mmap_offset *mmo; obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) return PTR_ERR(obj); - err = create_mmap_offset(obj); + mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); i915_gem_object_put(obj); - return err == expected; + return PTR_ERR_OR_ZERO(mmo) == expected; } static void disable_retire_worker(struct drm_i915_private *i915) @@ -600,28 +608,50 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_i915_private *i915 = arg; struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; struct drm_i915_gem_object *obj; - struct drm_mm_node resv, *hole; - u64 hole_start, hole_end; - int loop, err; + struct drm_mm_node *hole, *next; + struct i915_mmap_offset *mmo; + int loop, err = 0; /* Disable background reaper */ disable_retire_worker(i915); GEM_BUG_ON(!i915->gt.awake); + intel_gt_retire_requests(&i915->gt); + i915_gem_drain_freed_objects(i915); /* Trim the device mmap space to only a page */ - memset(&resv, 0, sizeof(resv)); - drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { - resv.start = hole_start; - resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ - mmap_offset_lock(i915); - err = drm_mm_reserve_node(mm, &resv); - mmap_offset_unlock(i915); + mmap_offset_lock(i915); + loop = 1; /* PAGE_SIZE units */ + list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { + struct drm_mm_node *resv; + + resv = kzalloc(sizeof(*resv), GFP_NOWAIT); + if (!resv) { + err = -ENOMEM; + goto out_park; + } + + resv->start = drm_mm_hole_node_start(hole) + loop; + resv->size = hole->hole_size - loop; + resv->color = -1ul; + loop = 0; + + if (!resv->size) { + kfree(resv); + continue; + } + + pr_debug("Reserving hole [%llx + %llx]\n", + resv->start, resv->size); + + err = drm_mm_reserve_node(mm, resv); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); + kfree(resv); goto out_park; } - break; } + GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); + mmap_offset_unlock(i915); /* Just fits! */ if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { @@ -644,9 +674,10 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } - err = create_mmap_offset(obj); - if (err) { + mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); + if (IS_ERR(mmo)) { pr_err("Unable to insert object into reclaimed hole\n"); + err = PTR_ERR(mmo); goto err_obj; } @@ -678,9 +709,15 @@ static int igt_mmap_offset_exhaustion(void *arg) out: mmap_offset_lock(i915); - drm_mm_remove_node(&resv); - mmap_offset_unlock(i915); out_park: + drm_mm_for_each_node_safe(hole, next, mm) { + if (hole->color != -1ul) + continue; + + drm_mm_remove_node(hole); + kfree(hole); + } + mmap_offset_unlock(i915); restore_retire_worker(i915); return err; err_obj: @@ -688,12 +725,258 @@ err_obj: goto out; } +#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) +static int igt_mmap(void *arg, enum i915_mmap_type type) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct i915_mmap_offset *mmo; + struct vm_area_struct *area; + unsigned long addr; + void *vaddr; + int err = 0, i; + + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return 0; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out; + } + memset(vaddr, POISON_INUSE, PAGE_SIZE); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) { + err = PTR_ERR(mmo); + goto out; + } + + addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + if (IS_ERR_VALUE(addr)) { + err = addr; + goto out; + } + + pr_debug("igt_mmap() @ %lx\n", addr); + + area = find_vma(current->mm, addr); + if (!area) { + pr_err("Did not create a vm_area_struct for the mmap\n"); + err = -EINVAL; + goto out_unmap; + } + + if (area->vm_private_data != mmo) { + pr_err("vm_area_struct did not point back to our mmap_offset object!\n"); + err = -EINVAL; + goto out_unmap; + } + + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) { + u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); + u32 x; + + if (get_user(x, ux)) { + pr_err("Unable to read from mmap, offset:%zd\n", + i * sizeof(x)); + err = -EFAULT; + break; + } + + if (x != expand32(POISON_INUSE)) { + pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", + i * sizeof(x), x, expand32(POISON_INUSE)); + err = -EINVAL; + break; + } + + x = expand32(POISON_FREE); + if (put_user(x, ux)) { + pr_err("Unable to write to mmap, offset:%zd\n", + i * sizeof(x)); + err = -EFAULT; + break; + } + } + +out_unmap: + vm_munmap(addr, PAGE_SIZE); + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out; + } + if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) { + pr_err("Write via mmap did not land in backing store\n"); + err = -EINVAL; + } + i915_gem_object_unpin_map(obj); + +out: + i915_gem_object_put(obj); + return err; +} + +static int igt_mmap_gtt(void *arg) +{ + return igt_mmap(arg, I915_MMAP_TYPE_GTT); +} + +static int igt_mmap_cpu(void *arg) +{ + return igt_mmap(arg, I915_MMAP_TYPE_WC); +} + +static int check_present_pte(pte_t *pte, unsigned long addr, void *data) +{ + if (!pte_present(*pte) || pte_none(*pte)) { + pr_err("missing PTE:%lx\n", + (addr - (unsigned long)data) >> PAGE_SHIFT); + return -EINVAL; + } + + return 0; +} + +static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) +{ + if (pte_present(*pte) && !pte_none(*pte)) { + pr_err("present PTE:%lx; expected to be revoked\n", + (addr - (unsigned long)data) >> PAGE_SHIFT); + return -EINVAL; + } + + return 0; +} + +static int check_present(unsigned long addr, unsigned long len) +{ + return apply_to_page_range(current->mm, addr, len, + check_present_pte, (void *)addr); +} + +static int check_absent(unsigned long addr, unsigned long len) +{ + return apply_to_page_range(current->mm, addr, len, + check_absent_pte, (void *)addr); +} + +static int prefault_range(u64 start, u64 len) +{ + const char __user *addr, *end; + char __maybe_unused c; + int err; + + addr = u64_to_user_ptr(start); + end = addr + len; + + for (; addr < end; addr += PAGE_SIZE) { + err = __get_user(c, addr); + if (err) + return err; + } + + return __get_user(c, end - 1); +} + +static int igt_mmap_revoke(void *arg, enum i915_mmap_type type) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct i915_mmap_offset *mmo; + unsigned long addr; + int err; + + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return 0; + + obj = i915_gem_object_create_internal(i915, SZ_4M); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + mmo = mmap_offset_attach(obj, type, NULL); + if (IS_ERR(mmo)) { + err = PTR_ERR(mmo); + goto out; + } + + addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED); + if (IS_ERR_VALUE(addr)) { + err = addr; + goto out; + } + + err = prefault_range(addr, obj->base.size); + if (err) + goto out_unmap; + + GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT && + !atomic_read(&obj->bind_count)); + + err = check_present(addr, obj->base.size); + if (err) + goto out_unmap; + + /* + * After unbinding the object from the GGTT, its address may be reused + * for other objects. Ergo we have to revoke the previous mmap PTE + * access as it no longer points to the same object. + */ + err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + if (err) { + pr_err("Failed to unbind object!\n"); + goto out_unmap; + } + GEM_BUG_ON(atomic_read(&obj->bind_count)); + + if (type != I915_MMAP_TYPE_GTT) { + __i915_gem_object_put_pages(obj); + if (i915_gem_object_has_pages(obj)) { + pr_err("Failed to put-pages object!\n"); + err = -EINVAL; + goto out_unmap; + } + } + + err = check_absent(addr, obj->base.size); + if (err) + goto out_unmap; + +out_unmap: + vm_munmap(addr, obj->base.size); +out: + i915_gem_object_put(obj); + return err; +} + +static int igt_mmap_gtt_revoke(void *arg) +{ + return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT); +} + +static int igt_mmap_cpu_revoke(void *arg) +{ + return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC); +} + int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), + SUBTEST(igt_mmap_gtt), + SUBTEST(igt_mmap_cpu), + SUBTEST(igt_mmap_gtt_revoke), + SUBTEST(igt_mmap_cpu_revoke), }; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index 9ec55b3a3815..62077fe46715 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -3,40 +3,247 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/sort.h> + #include "gt/intel_gt.h" +#include "gt/intel_engine_user.h" #include "i915_selftest.h" +#include "gem/i915_gem_context.h" #include "selftests/igt_flush_test.h" +#include "selftests/i915_random.h" #include "selftests/mock_drm.h" #include "huge_gem_object.h" #include "mock_context.h" -static int igt_fill_blt(void *arg) +static int wrap_ktime_compare(const void *A, const void *B) +{ + const ktime_t *a = A, *b = B; + + return ktime_compare(*a, *b); +} + +static int __perf_fill_blt(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + int inst = 0; + + do { + struct intel_engine_cs *engine; + ktime_t t[5]; + int pass; + int err; + + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + intel_engine_pm_get(engine); + for (pass = 0; pass < ARRAY_SIZE(t); pass++) { + struct intel_context *ce = engine->kernel_context; + ktime_t t0, t1; + + t0 = ktime_get(); + + err = i915_gem_object_fill_blt(obj, ce, 0); + if (err) + break; + + err = i915_gem_object_wait(obj, + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + break; + + t1 = ktime_get(); + t[pass] = ktime_sub(t1, t0); + } + intel_engine_pm_put(engine); + if (err) + return err; + + sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); + pr_info("%s: blt %zd KiB fill: %lld MiB/s\n", + engine->name, + obj->base.size >> 10, + div64_u64(mul_u32_u32(4 * obj->base.size, + 1000 * 1000 * 1000), + t[1] + 2 * t[2] + t[3]) >> 20); + } while (1); +} + +static int perf_fill_blt(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_context *ce = i915->engine[BCS0]->kernel_context; - struct drm_i915_gem_object *obj; + static const unsigned long sizes[] = { + SZ_4K, + SZ_64K, + SZ_2M, + SZ_64M + }; + int i; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_internal(i915, sizes[i]); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __perf_fill_blt(obj); + i915_gem_object_put(obj); + if (err) + return err; + } + + return 0; +} + +static int __perf_copy_blt(struct drm_i915_gem_object *src, + struct drm_i915_gem_object *dst) +{ + struct drm_i915_private *i915 = to_i915(src->base.dev); + int inst = 0; + + do { + struct intel_engine_cs *engine; + ktime_t t[5]; + int pass; + int err = 0; + + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + intel_engine_pm_get(engine); + for (pass = 0; pass < ARRAY_SIZE(t); pass++) { + struct intel_context *ce = engine->kernel_context; + ktime_t t0, t1; + + t0 = ktime_get(); + + err = i915_gem_object_copy_blt(src, dst, ce); + if (err) + break; + + err = i915_gem_object_wait(dst, + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + break; + + t1 = ktime_get(); + t[pass] = ktime_sub(t1, t0); + } + intel_engine_pm_put(engine); + if (err) + return err; + + sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); + pr_info("%s: blt %zd KiB copy: %lld MiB/s\n", + engine->name, + src->base.size >> 10, + div64_u64(mul_u32_u32(4 * src->base.size, + 1000 * 1000 * 1000), + t[1] + 2 * t[2] + t[3]) >> 20); + } while (1); +} + +static int perf_copy_blt(void *arg) +{ + struct drm_i915_private *i915 = arg; + static const unsigned long sizes[] = { + SZ_4K, + SZ_64K, + SZ_2M, + SZ_64M + }; + int i; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + struct drm_i915_gem_object *src, *dst; + int err; + + src = i915_gem_object_create_internal(i915, sizes[i]); + if (IS_ERR(src)) + return PTR_ERR(src); + + dst = i915_gem_object_create_internal(i915, sizes[i]); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto err_src; + } + + err = __perf_copy_blt(src, dst); + + i915_gem_object_put(dst); +err_src: + i915_gem_object_put(src); + if (err) + return err; + } + + return 0; +} + +struct igt_thread_arg { + struct drm_i915_private *i915; + struct i915_gem_context *ctx; + struct file *file; struct rnd_state prng; + unsigned int n_cpus; +}; + +static int igt_fill_blt_thread(void *arg) +{ + struct igt_thread_arg *thread = arg; + struct drm_i915_private *i915 = thread->i915; + struct rnd_state *prng = &thread->prng; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct intel_context *ce; + unsigned int prio; IGT_TIMEOUT(end); - u32 *vaddr; - int err = 0; + int err; + + ctx = thread->ctx; + if (!ctx) { + ctx = live_context(i915, thread->file); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - prandom_seed_state(&prng, i915_selftest.random_seed); + prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng); + ctx->sched.priority = I915_USER_PRIORITY(prio); + } - /* - * XXX: needs some threads to scale all these tests, also maybe throw - * in submission from higher priority context to see if we are - * preempted for very large objects... - */ + ce = i915_gem_context_get_engine(ctx, BCS0); + GEM_BUG_ON(IS_ERR(ce)); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; - u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); - u32 phys_sz = sz % (max_block_size + 1); - u32 val = prandom_u32_state(&prng); + u32 val = prandom_u32_state(prng); + u64 total = ce->vm->total; + u32 phys_sz; + u32 sz; + u32 *vaddr; u32 i; + /* + * If we have a tiny shared address space, like for the GGTT + * then we can't be too greedy. + */ + if (i915_is_ggtt(ce->vm)) + total = div64_u64(total, thread->n_cpus); + + sz = min_t(u64, total >> 4, prandom_u32_state(prng)); + phys_sz = sz % (max_block_size + 1); + sz = round_up(sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE); @@ -98,28 +305,50 @@ err_flush: if (err == -ENOMEM) err = 0; + intel_context_put(ce); return err; } -static int igt_copy_blt(void *arg) +static int igt_copy_blt_thread(void *arg) { - struct drm_i915_private *i915 = arg; - struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct igt_thread_arg *thread = arg; + struct drm_i915_private *i915 = thread->i915; + struct rnd_state *prng = &thread->prng; struct drm_i915_gem_object *src, *dst; - struct rnd_state prng; + struct i915_gem_context *ctx; + struct intel_context *ce; + unsigned int prio; IGT_TIMEOUT(end); - u32 *vaddr; - int err = 0; + int err; - prandom_seed_state(&prng, i915_selftest.random_seed); + ctx = thread->ctx; + if (!ctx) { + ctx = live_context(i915, thread->file); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng); + ctx->sched.priority = I915_USER_PRIORITY(prio); + } + + ce = i915_gem_context_get_engine(ctx, BCS0); + GEM_BUG_ON(IS_ERR(ce)); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; - u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); - u32 phys_sz = sz % (max_block_size + 1); - u32 val = prandom_u32_state(&prng); + u32 val = prandom_u32_state(prng); + u64 total = ce->vm->total; + u32 phys_sz; + u32 sz; + u32 *vaddr; u32 i; + if (i915_is_ggtt(ce->vm)) + total = div64_u64(total, thread->n_cpus); + + sz = min_t(u64, total >> 4, prandom_u32_state(prng)); + phys_sz = sz % (max_block_size + 1); + sz = round_up(sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE); @@ -201,14 +430,113 @@ err_flush: if (err == -ENOMEM) err = 0; + intel_context_put(ce); return err; } +static int igt_threaded_blt(struct drm_i915_private *i915, + int (*blt_fn)(void *arg), + unsigned int flags) +#define SINGLE_CTX BIT(0) +{ + struct igt_thread_arg *thread; + struct task_struct **tsk; + unsigned int n_cpus, i; + I915_RND_STATE(prng); + int err = 0; + + n_cpus = num_online_cpus() + 1; + + tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL); + if (!tsk) + return 0; + + thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL); + if (!thread) + goto out_tsk; + + thread[0].file = mock_file(i915); + if (IS_ERR(thread[0].file)) { + err = PTR_ERR(thread[0].file); + goto out_thread; + } + + if (flags & SINGLE_CTX) { + thread[0].ctx = live_context(i915, thread[0].file); + if (IS_ERR(thread[0].ctx)) { + err = PTR_ERR(thread[0].ctx); + goto out_file; + } + } + + for (i = 0; i < n_cpus; ++i) { + thread[i].i915 = i915; + thread[i].file = thread[0].file; + thread[i].ctx = thread[0].ctx; + thread[i].n_cpus = n_cpus; + thread[i].prng = + I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng)); + + tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i); + if (IS_ERR(tsk[i])) { + err = PTR_ERR(tsk[i]); + break; + } + + get_task_struct(tsk[i]); + } + + yield(); /* start all threads before we kthread_stop() */ + + for (i = 0; i < n_cpus; ++i) { + int status; + + if (IS_ERR_OR_NULL(tsk[i])) + continue; + + status = kthread_stop(tsk[i]); + if (status && !err) + err = status; + + put_task_struct(tsk[i]); + } + +out_file: + fput(thread[0].file); +out_thread: + kfree(thread); +out_tsk: + kfree(tsk); + return err; +} + +static int igt_fill_blt(void *arg) +{ + return igt_threaded_blt(arg, igt_fill_blt_thread, 0); +} + +static int igt_fill_blt_ctx0(void *arg) +{ + return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX); +} + +static int igt_copy_blt(void *arg) +{ + return igt_threaded_blt(arg, igt_copy_blt_thread, 0); +} + +static int igt_copy_blt_ctx0(void *arg) +{ + return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX); +} + int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_fill_blt), + SUBTEST(igt_fill_blt_ctx0), SUBTEST(igt_copy_blt), + SUBTEST(igt_copy_blt_ctx0), }; if (intel_gt_is_wedged(&i915->gt)) @@ -219,3 +547,16 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) return i915_live_subtests(tests, i915); } + +int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(perf_fill_blt), + SUBTEST(perf_copy_blt), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_live_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 74ddd682c9cd..7d7e13dc2fdf 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -5,6 +5,7 @@ */ #include "mock_context.h" +#include "selftests/mock_drm.h" #include "selftests/mock_gtt.h" struct i915_gem_context * @@ -22,6 +23,8 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + i915_gem_context_set_persistence(ctx); + mutex_init(&ctx->engines_mutex); e = default_engines(ctx); if (IS_ERR(e)) @@ -34,9 +37,7 @@ mock_context(struct drm_i915_private *i915, if (name) { struct i915_ppgtt *ppgtt; - ctx->name = kstrdup(name, GFP_KERNEL); - if (!ctx->name) - goto err_put; + strncpy(ctx->name, name, sizeof(ctx->name)); ppgtt = mock_ppgtt(i915, name); if (!ppgtt) @@ -72,7 +73,7 @@ void mock_init_contexts(struct drm_i915_private *i915) } struct i915_gem_context * -live_context(struct drm_i915_private *i915, struct drm_file *file) +live_context(struct drm_i915_private *i915, struct file *file) { struct i915_gem_context *ctx; int err; @@ -81,7 +82,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file) if (IS_ERR(ctx)) return ctx; - err = gem_context_register(ctx, file->driver_priv); + err = gem_context_register(ctx, to_drm_file(file)->driver_priv); if (err < 0) goto err_ctx; @@ -95,7 +96,16 @@ err_ctx: struct i915_gem_context * kernel_context(struct drm_i915_private *i915) { - return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL); + struct i915_gem_context *ctx; + + ctx = i915_gem_create_context(i915, 0); + if (IS_ERR(ctx)) + return ctx; + + i915_gem_context_clear_bannable(ctx); + i915_gem_context_set_persistence(ctx); + + return ctx; } void kernel_context_close(struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h index 0b926653914f..fb83d2f09212 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h @@ -7,6 +7,9 @@ #ifndef __MOCK_CONTEXT_H #define __MOCK_CONTEXT_H +struct file; +struct drm_i915_private; + void mock_init_contexts(struct drm_i915_private *i915); struct i915_gem_context * @@ -16,7 +19,7 @@ mock_context(struct drm_i915_private *i915, void mock_context_close(struct i915_gem_context *ctx); struct i915_gem_context * -live_context(struct drm_i915_private *i915, struct drm_file *file); +live_context(struct drm_i915_private *i915, struct file *file); struct i915_gem_context *kernel_context(struct drm_i915_private *i915); void kernel_context_close(struct i915_gem_context *ctx); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h index f0f8bbd82dfc..22818bbb139d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h @@ -14,7 +14,7 @@ struct mock_dmabuf { struct page *pages[]; }; -static struct mock_dmabuf *to_mock(struct dma_buf *buf) +static inline struct mock_dmabuf *to_mock(struct dma_buf *buf) { return buf->priv; } diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c new file mode 100644 index 000000000000..6a5e9ab20b94 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright © 2019 Intel Corporation + */ + +#include <drm/drm_print.h> + +#include "debugfs_engines.h" +#include "debugfs_gt.h" +#include "i915_drv.h" /* for_each_engine! */ +#include "intel_engine.h" + +static int engines_show(struct seq_file *m, void *data) +{ + struct intel_gt *gt = m->private; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct drm_printer p; + + p = drm_seq_file_printer(m); + for_each_engine(engine, gt, id) + intel_engine_dump(engine, &p, "%s\n", engine->name); + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(engines); + +void debugfs_engines_register(struct intel_gt *gt, struct dentry *root) +{ + static const struct debugfs_gt_file files[] = { + { "engines", &engines_fops }, + }; + + debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files)); +} diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h new file mode 100644 index 000000000000..f69257eaa1cc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/debugfs_engines.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef DEBUGFS_ENGINES_H +#define DEBUGFS_ENGINES_H + +struct intel_gt; +struct dentry; + +void debugfs_engines_register(struct intel_gt *gt, struct dentry *root); + +#endif /* DEBUGFS_ENGINES_H */ diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c new file mode 100644 index 000000000000..75255aaacaed --- /dev/null +++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/debugfs.h> + +#include "debugfs_engines.h" +#include "debugfs_gt.h" +#include "debugfs_gt_pm.h" +#include "i915_drv.h" + +void debugfs_gt_register(struct intel_gt *gt) +{ + struct dentry *root; + + if (!gt->i915->drm.primary->debugfs_root) + return; + + root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root); + if (IS_ERR(root)) + return; + + debugfs_engines_register(gt, root); + debugfs_gt_pm_register(gt, root); +} + +void debugfs_gt_register_files(struct intel_gt *gt, + struct dentry *root, + const struct debugfs_gt_file *files, + unsigned long count) +{ + while (count--) { + if (!files->eval || files->eval(gt)) + debugfs_create_file(files->name, + 0444, root, gt, + files->fops); + + files++; + } +} diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h new file mode 100644 index 000000000000..4ea0f06cda8f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef DEBUGFS_GT_H +#define DEBUGFS_GT_H + +#include <linux/file.h> + +struct intel_gt; + +#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \ + static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +void debugfs_gt_register(struct intel_gt *gt); + +struct debugfs_gt_file { + const char *name; + const struct file_operations *fops; + bool (*eval)(const struct intel_gt *gt); +}; + +void debugfs_gt_register_files(struct intel_gt *gt, + struct dentry *root, + const struct debugfs_gt_file *files, + unsigned long count); + +#endif /* DEBUGFS_GT_H */ diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c new file mode 100644 index 000000000000..059c9e5c002e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/seq_file.h> + +#include "debugfs_gt.h" +#include "debugfs_gt_pm.h" +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_llc.h" +#include "intel_rc6.h" +#include "intel_rps.h" +#include "intel_runtime_pm.h" +#include "intel_sideband.h" +#include "intel_uncore.h" + +static int fw_domains_show(struct seq_file *m, void *data) +{ + struct intel_gt *gt = m->private; + struct intel_uncore *uncore = gt->uncore; + struct intel_uncore_forcewake_domain *fw_domain; + unsigned int tmp; + + seq_printf(m, "user.bypass_count = %u\n", + uncore->user_forcewake_count); + + for_each_fw_domain(fw_domain, uncore, tmp) + seq_printf(m, "%s.wake_count = %u\n", + intel_uncore_forcewake_domain_to_str(fw_domain->id), + READ_ONCE(fw_domain->wake_count)); + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains); + +static void print_rc6_res(struct seq_file *m, + const char *title, + const i915_reg_t reg) +{ + struct intel_gt *gt = m->private; + intel_wakeref_t wakeref; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + seq_printf(m, "%s %u (%llu us)\n", title, + intel_uncore_read(gt->uncore, reg), + intel_rc6_residency_us(>->rc6, reg)); +} + +static int vlv_drpc(struct seq_file *m) +{ + struct intel_gt *gt = m->private; + struct intel_uncore *uncore = gt->uncore; + u32 rcctl1, pw_status; + + pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS); + rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL); + + seq_printf(m, "RC6 Enabled: %s\n", + yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | + GEN6_RC_CTL_EI_MODE(1)))); + seq_printf(m, "Render Power Well: %s\n", + (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); + seq_printf(m, "Media Power Well: %s\n", + (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); + + print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); + print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); + + return fw_domains_show(m, NULL); +} + +static int gen6_drpc(struct seq_file *m) +{ + struct intel_gt *gt = m->private; + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 gt_core_status, rcctl1, rc6vids = 0; + u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; + + gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS); + + rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL); + if (INTEL_GEN(i915) >= 9) { + gen9_powergate_enable = + intel_uncore_read(uncore, GEN9_PG_ENABLE); + gen9_powergate_status = + intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS); + } + + if (INTEL_GEN(i915) <= 7) + sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, + &rc6vids, NULL); + + seq_printf(m, "RC1e Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); + seq_printf(m, "RC6 Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); + if (INTEL_GEN(i915) >= 9) { + seq_printf(m, "Render Well Gating Enabled: %s\n", + yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); + seq_printf(m, "Media Well Gating Enabled: %s\n", + yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); + } + seq_printf(m, "Deep RC6 Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); + seq_printf(m, "Deepest RC6 Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); + seq_puts(m, "Current RC state: "); + switch (gt_core_status & GEN6_RCn_MASK) { + case GEN6_RC0: + if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) + seq_puts(m, "Core Power Down\n"); + else + seq_puts(m, "on\n"); + break; + case GEN6_RC3: + seq_puts(m, "RC3\n"); + break; + case GEN6_RC6: + seq_puts(m, "RC6\n"); + break; + case GEN6_RC7: + seq_puts(m, "RC7\n"); + break; + default: + seq_puts(m, "Unknown\n"); + break; + } + + seq_printf(m, "Core Power Down: %s\n", + yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); + if (INTEL_GEN(i915) >= 9) { + seq_printf(m, "Render Power Well: %s\n", + (gen9_powergate_status & + GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); + seq_printf(m, "Media Power Well: %s\n", + (gen9_powergate_status & + GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); + } + + /* Not exactly sure what this is */ + print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", + GEN6_GT_GFX_RC6_LOCKED); + print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); + print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); + print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); + + if (INTEL_GEN(i915) <= 7) { + seq_printf(m, "RC6 voltage: %dmV\n", + GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); + seq_printf(m, "RC6+ voltage: %dmV\n", + GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); + seq_printf(m, "RC6++ voltage: %dmV\n", + GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); + } + + return fw_domains_show(m, NULL); +} + +static int ilk_drpc(struct seq_file *m) +{ + struct intel_gt *gt = m->private; + struct intel_uncore *uncore = gt->uncore; + u32 rgvmodectl, rstdbyctl; + u16 crstandvid; + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL); + crstandvid = intel_uncore_read16(uncore, CRSTANDVID); + + seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); + seq_printf(m, "Boost freq: %d\n", + (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> + MEMMODE_BOOST_FREQ_SHIFT); + seq_printf(m, "HW control enabled: %s\n", + yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); + seq_printf(m, "SW control enabled: %s\n", + yesno(rgvmodectl & MEMMODE_SWMODE_EN)); + seq_printf(m, "Gated voltage change: %s\n", + yesno(rgvmodectl & MEMMODE_RCLK_GATE)); + seq_printf(m, "Starting frequency: P%d\n", + (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); + seq_printf(m, "Max P-state: P%d\n", + (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); + seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); + seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); + seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); + seq_printf(m, "Render standby enabled: %s\n", + yesno(!(rstdbyctl & RCX_SW_EXIT))); + seq_puts(m, "Current RS state: "); + switch (rstdbyctl & RSX_STATUS_MASK) { + case RSX_STATUS_ON: + seq_puts(m, "on\n"); + break; + case RSX_STATUS_RC1: + seq_puts(m, "RC1\n"); + break; + case RSX_STATUS_RC1E: + seq_puts(m, "RC1E\n"); + break; + case RSX_STATUS_RS1: + seq_puts(m, "RS1\n"); + break; + case RSX_STATUS_RS2: + seq_puts(m, "RS2 (RC6)\n"); + break; + case RSX_STATUS_RS3: + seq_puts(m, "RC3 (RC6+)\n"); + break; + default: + seq_puts(m, "unknown\n"); + break; + } + + return 0; +} + +static int drpc_show(struct seq_file *m, void *unused) +{ + struct intel_gt *gt = m->private; + struct drm_i915_private *i915 = gt->i915; + intel_wakeref_t wakeref; + int err = -ENODEV; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + err = vlv_drpc(m); + else if (INTEL_GEN(i915) >= 6) + err = gen6_drpc(m); + else + err = ilk_drpc(m); + } + + return err; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc); + +static int frequency_show(struct seq_file *m, void *unused) +{ + struct intel_gt *gt = m->private; + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_rps *rps = >->rps; + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(uncore->rpm); + + if (IS_GEN(i915, 5)) { + u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); + + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + MEMSTAT_VID_SHIFT); + seq_printf(m, "Current P-state: %d\n", + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + u32 rpmodectl, freq_sts; + + rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); + seq_printf(m, "Video Turbo Mode: %s\n", + yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); + seq_printf(m, "HW control enabled: %s\n", + yesno(rpmodectl & GEN6_RP_ENABLE)); + seq_printf(m, "SW control enabled: %s\n", + yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == + GEN6_RP_MEDIA_SW_MODE)); + + vlv_punit_get(i915); + freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + vlv_punit_put(i915); + + seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); + seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq); + + seq_printf(m, "actual GPU freq: %d MHz\n", + intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); + + seq_printf(m, "current GPU freq: %d MHz\n", + intel_gpu_freq(rps, rps->cur_freq)); + + seq_printf(m, "max GPU freq: %d MHz\n", + intel_gpu_freq(rps, rps->max_freq)); + + seq_printf(m, "min GPU freq: %d MHz\n", + intel_gpu_freq(rps, rps->min_freq)); + + seq_printf(m, "idle GPU freq: %d MHz\n", + intel_gpu_freq(rps, rps->idle_freq)); + + seq_printf(m, "efficient (RPe) frequency: %d MHz\n", + intel_gpu_freq(rps, rps->efficient_freq)); + } else if (INTEL_GEN(i915) >= 6) { + u32 rp_state_limits; + u32 gt_perf_status; + u32 rp_state_cap; + u32 rpmodectl, rpinclimit, rpdeclimit; + u32 rpstat, cagf, reqf; + u32 rpupei, rpcurup, rpprevup; + u32 rpdownei, rpcurdown, rpprevdown; + u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; + int max_freq; + + rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); + if (IS_GEN9_LP(i915)) { + rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); + gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); + } else { + rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); + gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); + } + + /* RPSTAT1 is in the GT power well */ + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); + if (INTEL_GEN(i915) >= 9) { + reqf >>= 23; + } else { + reqf &= ~GEN6_TURBO_DISABLE; + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + reqf >>= 24; + else + reqf >>= 25; + } + reqf = intel_gpu_freq(rps, reqf); + + rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); + rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); + rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); + + rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1); + rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; + rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; + rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; + rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; + rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; + rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; + cagf = intel_rps_read_actual_frequency(rps); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + if (INTEL_GEN(i915) >= 11) { + pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); + pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); + /* + * The equivalent to the PM ISR & IIR cannot be read + * without affecting the current state of the system + */ + pm_isr = 0; + pm_iir = 0; + } else if (INTEL_GEN(i915) >= 8) { + pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); + pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); + pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); + pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2)); + } else { + pm_ier = intel_uncore_read(uncore, GEN6_PMIER); + pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); + pm_isr = intel_uncore_read(uncore, GEN6_PMISR); + pm_iir = intel_uncore_read(uncore, GEN6_PMIIR); + } + pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); + + seq_printf(m, "Video Turbo Mode: %s\n", + yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); + seq_printf(m, "HW control enabled: %s\n", + yesno(rpmodectl & GEN6_RP_ENABLE)); + seq_printf(m, "SW control enabled: %s\n", + yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == + GEN6_RP_MEDIA_SW_MODE)); + + seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", + pm_ier, pm_imr, pm_mask); + if (INTEL_GEN(i915) <= 10) + seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", + pm_isr, pm_iir); + seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", + rps->pm_intrmsk_mbz); + seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); + seq_printf(m, "Render p-state ratio: %d\n", + (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); + seq_printf(m, "Render p-state VID: %d\n", + gt_perf_status & 0xff); + seq_printf(m, "Render p-state limit: %d\n", + rp_state_limits & 0xff); + seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); + seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); + seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); + seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); + seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); + seq_printf(m, "CAGF: %dMHz\n", cagf); + seq_printf(m, "RP CUR UP EI: %d (%dus)\n", + rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei)); + seq_printf(m, "RP CUR UP: %d (%dus)\n", + rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup)); + seq_printf(m, "RP PREV UP: %d (%dus)\n", + rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup)); + seq_printf(m, "Up threshold: %d%%\n", + rps->power.up_threshold); + + seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", + rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei)); + seq_printf(m, "RP CUR DOWN: %d (%dus)\n", + rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown)); + seq_printf(m, "RP PREV DOWN: %d (%dus)\n", + rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown)); + seq_printf(m, "Down threshold: %d%%\n", + rps->power.down_threshold); + + max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 : + rp_state_cap >> 16) & 0xff; + max_freq *= (IS_GEN9_BC(i915) || + INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1); + seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", + intel_gpu_freq(rps, max_freq)); + + max_freq = (rp_state_cap & 0xff00) >> 8; + max_freq *= (IS_GEN9_BC(i915) || + INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1); + seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", + intel_gpu_freq(rps, max_freq)); + + max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 : + rp_state_cap >> 0) & 0xff; + max_freq *= (IS_GEN9_BC(i915) || + INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1); + seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", + intel_gpu_freq(rps, max_freq)); + seq_printf(m, "Max overclocked frequency: %dMHz\n", + intel_gpu_freq(rps, rps->max_freq)); + + seq_printf(m, "Current freq: %d MHz\n", + intel_gpu_freq(rps, rps->cur_freq)); + seq_printf(m, "Actual freq: %d MHz\n", cagf); + seq_printf(m, "Idle freq: %d MHz\n", + intel_gpu_freq(rps, rps->idle_freq)); + seq_printf(m, "Min freq: %d MHz\n", + intel_gpu_freq(rps, rps->min_freq)); + seq_printf(m, "Boost freq: %d MHz\n", + intel_gpu_freq(rps, rps->boost_freq)); + seq_printf(m, "Max freq: %d MHz\n", + intel_gpu_freq(rps, rps->max_freq)); + seq_printf(m, + "efficient (RPe) frequency: %d MHz\n", + intel_gpu_freq(rps, rps->efficient_freq)); + } else { + seq_puts(m, "no P-state info available\n"); + } + + seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk); + seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq); + seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); + + intel_runtime_pm_put(uncore->rpm, wakeref); + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency); + +static int llc_show(struct seq_file *m, void *data) +{ + struct intel_gt *gt = m->private; + struct drm_i915_private *i915 = gt->i915; + const bool edram = INTEL_GEN(i915) > 8; + struct intel_rps *rps = >->rps; + unsigned int max_gpu_freq, min_gpu_freq; + intel_wakeref_t wakeref; + int gpu_freq, ia_freq; + + seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915))); + seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", + i915->edram_size_mb); + + min_gpu_freq = rps->min_freq; + max_gpu_freq = rps->max_freq; + if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + /* Convert GT frequency to 50 HZ units */ + min_gpu_freq /= GEN9_FREQ_SCALER; + max_gpu_freq /= GEN9_FREQ_SCALER; + } + + seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { + ia_freq = gpu_freq; + sandybridge_pcode_read(i915, + GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq, NULL); + seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", + intel_gpu_freq(rps, + (gpu_freq * + (IS_GEN9_BC(i915) || + INTEL_GEN(i915) >= 10 ? + GEN9_FREQ_SCALER : 1))), + ((ia_freq >> 0) & 0xff) * 100, + ((ia_freq >> 8) & 0xff) * 100); + } + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + + return 0; +} + +static bool llc_eval(const struct intel_gt *gt) +{ + return HAS_LLC(gt->i915); +} + +DEFINE_GT_DEBUGFS_ATTRIBUTE(llc); + +static const char *rps_power_to_str(unsigned int power) +{ + static const char * const strings[] = { + [LOW_POWER] = "low power", + [BETWEEN] = "mixed", + [HIGH_POWER] = "high power", + }; + + if (power >= ARRAY_SIZE(strings) || !strings[power]) + return "unknown"; + + return strings[power]; +} + +static int rps_boost_show(struct seq_file *m, void *data) +{ + struct intel_gt *gt = m->private; + struct drm_i915_private *i915 = gt->i915; + struct intel_rps *rps = >->rps; + + seq_printf(m, "RPS enabled? %d\n", rps->enabled); + seq_printf(m, "GPU busy? %s\n", yesno(gt->awake)); + seq_printf(m, "Boosts outstanding? %d\n", + atomic_read(&rps->num_waiters)); + seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); + seq_printf(m, "Frequency requested %d, actual %d\n", + intel_gpu_freq(rps, rps->cur_freq), + intel_rps_read_actual_frequency(rps)); + seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", + intel_gpu_freq(rps, rps->min_freq), + intel_gpu_freq(rps, rps->min_freq_softlimit), + intel_gpu_freq(rps, rps->max_freq_softlimit), + intel_gpu_freq(rps, rps->max_freq)); + seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", + intel_gpu_freq(rps, rps->idle_freq), + intel_gpu_freq(rps, rps->efficient_freq), + intel_gpu_freq(rps, rps->boost_freq)); + + seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); + + if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) { + struct intel_uncore *uncore = gt->uncore; + u32 rpup, rpupei; + u32 rpdown, rpdownei; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; + rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; + rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; + rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", + rps_power_to_str(rps->power.mode)); + seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", + rpup && rpupei ? 100 * rpup / rpupei : 0, + rps->power.up_threshold); + seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", + rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, + rps->power.down_threshold); + } else { + seq_puts(m, "\nRPS Autotuning inactive\n"); + } + + return 0; +} + +static bool rps_eval(const struct intel_gt *gt) +{ + return HAS_RPS(gt->i915); +} + +DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost); + +void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root) +{ + static const struct debugfs_gt_file files[] = { + { "drpc", &drpc_fops, NULL }, + { "frequency", &frequency_fops, NULL }, + { "forcewake", &fw_domains_fops, NULL }, + { "llc", &llc_fops, llc_eval }, + { "rps_boost", &rps_boost_fops, rps_eval }, + }; + + debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files)); +} diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h new file mode 100644 index 000000000000..4cf5f5c9da7d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef DEBUGFS_GT_PM_H +#define DEBUGFS_GT_PM_H + +struct intel_gt; +struct dentry; + +void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root); + +#endif /* DEBUGFS_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 55317081d48b..0ba524a414c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -28,6 +28,8 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" static void irq_enable(struct intel_engine_cs *engine) { @@ -53,15 +55,17 @@ static void irq_disable(struct intel_engine_cs *engine) static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) { + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + lockdep_assert_held(&b->irq_lock); GEM_BUG_ON(!b->irq_enabled); if (!--b->irq_enabled) - irq_disable(container_of(b, - struct intel_engine_cs, - breadcrumbs)); + irq_disable(engine); b->irq_armed = false; + intel_gt_pm_put_async(engine->gt); } void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) @@ -127,16 +131,23 @@ __dma_fence_signal__notify(struct dma_fence *fence, } } -void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) +static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_engine_cs *engine = + container_of(b, struct intel_engine_cs, breadcrumbs); + + intel_engine_add_retire(engine, tl); +} + +static void signal_irq_work(struct irq_work *work) +{ + struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); const ktime_t timestamp = ktime_get(); struct intel_context *ce, *cn; struct list_head *pos, *next; - unsigned long flags; LIST_HEAD(signal); - spin_lock_irqsave(&b->irq_lock, flags); + spin_lock(&b->irq_lock); if (b->irq_armed && list_empty(&b->signalers)) __intel_breadcrumbs_disarm_irq(b); @@ -177,44 +188,41 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) if (!list_is_first(pos, &ce->signals)) { /* Advance the list to the first incomplete request */ __list_del_many(&ce->signals, pos); - if (&ce->signals == pos) /* now empty */ + if (&ce->signals == pos) { /* now empty */ list_del_init(&ce->signal_link); + add_retire(b, ce->timeline); + } } } - spin_unlock_irqrestore(&b->irq_lock, flags); + spin_unlock(&b->irq_lock); list_for_each_safe(pos, next, &signal) { struct i915_request *rq = list_entry(pos, typeof(*rq), signal_link); struct list_head cb_list; - spin_lock_irqsave(&rq->lock, flags); + spin_lock(&rq->lock); list_replace(&rq->fence.cb_list, &cb_list); __dma_fence_signal__timestamp(&rq->fence, timestamp); __dma_fence_signal__notify(&rq->fence, &cb_list); - spin_unlock_irqrestore(&rq->lock, flags); + spin_unlock(&rq->lock); i915_request_put(rq); } } -static void signal_irq_work(struct irq_work *work) -{ - struct intel_engine_cs *engine = - container_of(work, typeof(*engine), breadcrumbs.irq_work); - - intel_engine_breadcrumbs_irq(engine); -} - -static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) +static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { struct intel_engine_cs *engine = container_of(b, struct intel_engine_cs, breadcrumbs); lockdep_assert_held(&b->irq_lock); if (b->irq_armed) - return; + return true; + + if (!intel_gt_pm_get_if_awake(engine->gt)) + return false; /* * The breadcrumb irq will be disarmed on the interrupt after the @@ -234,6 +242,8 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) if (!b->irq_enabled++) irq_enable(engine); + + return true; } void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) @@ -271,19 +281,20 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - struct intel_context *ce = rq->hw_context; + struct intel_context *ce = rq->context; struct list_head *pos; spin_lock(&b->irq_lock); GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - __intel_breadcrumbs_arm_irq(b); + if (!__intel_breadcrumbs_arm_irq(b)) + goto unlock; /* * We keep the seqno in retirement order, so we can break - * inside intel_engine_breadcrumbs_irq as soon as we've passed - * the last completed request (or seen a request that hasn't - * event started). We could iterate the timeline->requests list, + * inside intel_engine_signal_breadcrumbs as soon as we've + * passed the last completed request (or seen a request that + * hasn't event started). We could walk the timeline->requests, * but keeping a separate signalers_list has the advantage of * hopefully being much smaller than the full list and so * provides faster iteration and detection when there are no @@ -306,6 +317,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) GEM_BUG_ON(!check_signal_order(ce, rq)); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); +unlock: spin_unlock(&b->irq_lock); } @@ -326,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) */ spin_lock(&b->irq_lock); if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { - struct intel_context *ce = rq->hw_context; + struct intel_context *ce = rq->context; list_del(&rq->signal_link); if (list_empty(&ce->signals)) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 59c3083c1ec1..fbaa9df6f436 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -13,6 +13,7 @@ #include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_ring.h" static struct i915_global_context { struct i915_global base; @@ -30,8 +31,7 @@ void intel_context_free(struct intel_context *ce) } struct intel_context * -intel_context_create(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +intel_context_create(struct intel_engine_cs *engine) { struct intel_context *ce; @@ -39,7 +39,7 @@ intel_context_create(struct i915_gem_context *ctx, if (!ce) return ERR_PTR(-ENOMEM); - intel_context_init(ce, ctx, engine); + intel_context_init(ce, engine); return ce; } @@ -67,11 +67,8 @@ int __intel_context_do_pin(struct intel_context *ce) if (err) goto err; - GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n", - ce->engine->name, ce->timeline->fence_context, - ce->ring->head, ce->ring->tail); - - i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ + CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n", + ce->ring->head, ce->ring->tail); smp_mb__before_atomic(); /* flush pin before it is visible */ } @@ -97,12 +94,10 @@ void intel_context_unpin(struct intel_context *ce) mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); if (likely(atomic_dec_and_test(&ce->pin_count))) { - GEM_TRACE("%s context:%llx retire\n", - ce->engine->name, ce->timeline->fence_context); + CE_TRACE(ce, "retire\n"); ce->ops->unpin(ce); - i915_gem_context_put(ce->gem_context); intel_context_active_release(ce); } @@ -112,13 +107,10 @@ void intel_context_unpin(struct intel_context *ce) static int __context_pin_state(struct i915_vma *vma) { - u64 flags; + unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; int err; - flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; - flags |= PIN_HIGH | PIN_GLOBAL; - - err = i915_vma_pin(vma, 0, 0, flags); + err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH); if (err) return err; @@ -143,9 +135,9 @@ static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); - GEM_TRACE("%s context:%llx retire\n", - ce->engine->name, ce->timeline->fence_context); + CE_TRACE(ce, "retire\n"); + set_bit(CONTEXT_VALID_BIT, &ce->flags); if (ce->state) __context_unpin_state(ce->state); @@ -197,7 +189,7 @@ int intel_context_active_acquire(struct intel_context *ce) return err; /* Preallocate tracking nodes */ - if (!i915_gem_context_is_kernel(ce->gem_context)) { + if (!intel_context_is_barrier(ce)) { err = i915_active_acquire_preallocate_barrier(&ce->active, ce->engine); if (err) { @@ -218,30 +210,19 @@ void intel_context_active_release(struct intel_context *ce) void intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct i915_address_space *vm; - GEM_BUG_ON(!engine->cops); + GEM_BUG_ON(!engine->gt->vm); kref_init(&ce->ref); - ce->gem_context = ctx; - rcu_read_lock(); - vm = rcu_dereference(ctx->vm); - if (vm) - ce->vm = i915_vm_get(vm); - else - ce->vm = i915_vm_get(&engine->gt->ggtt->vm); - rcu_read_unlock(); - if (ctx->timeline) - ce->timeline = intel_timeline_get(ctx->timeline); - ce->engine = engine; ce->ops = engine->cops; ce->sseu = engine->sseu; - ce->ring = __intel_context_ring_size(SZ_16K); + ce->ring = __intel_context_ring_size(SZ_4K); + + ce->vm = i915_vm_get(engine->gt->vm); INIT_LIST_HEAD(&ce->signal_link); INIT_LIST_HEAD(&ce->signals); @@ -306,17 +287,11 @@ int intel_context_prepare_remote_request(struct intel_context *ce, int err; /* Only suitable for use in remotely modifying this context */ - GEM_BUG_ON(rq->hw_context == ce); + GEM_BUG_ON(rq->context == ce); if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ - err = mutex_lock_interruptible_nested(&tl->mutex, - SINGLE_DEPTH_NESTING); - if (err) - return err; - /* Queue this switch after current activity by this context. */ err = i915_active_fence_set(&tl->last_request, rq); - mutex_unlock(&tl->mutex); if (err) return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index dd742ac2fbdb..1d4a1b1357cf 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -7,21 +7,29 @@ #ifndef __INTEL_CONTEXT_H__ #define __INTEL_CONTEXT_H__ +#include <linux/bitops.h> #include <linux/lockdep.h> +#include <linux/types.h> #include "i915_active.h" #include "intel_context_types.h" #include "intel_engine_types.h" +#include "intel_ring_types.h" #include "intel_timeline_types.h" +#define CE_TRACE(ce, fmt, ...) do { \ + const struct intel_context *ce__ = (ce); \ + ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \ + ce__->timeline->fence_context, \ + ##__VA_ARGS__); \ +} while (0) + void intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, struct intel_engine_cs *engine); void intel_context_fini(struct intel_context *ce); struct intel_context * -intel_context_create(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); +intel_context_create(struct intel_engine_cs *engine); void intel_context_free(struct intel_context *ce); @@ -152,4 +160,64 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz) return u64_to_ptr(struct intel_ring, sz); } +static inline bool intel_context_is_barrier(const struct intel_context *ce) +{ + return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); +} + +static inline bool intel_context_use_semaphores(const struct intel_context *ce) +{ + return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); +} + +static inline void intel_context_set_use_semaphores(struct intel_context *ce) +{ + set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); +} + +static inline void intel_context_clear_use_semaphores(struct intel_context *ce) +{ + clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); +} + +static inline bool intel_context_is_banned(const struct intel_context *ce) +{ + return test_bit(CONTEXT_BANNED, &ce->flags); +} + +static inline bool intel_context_set_banned(struct intel_context *ce) +{ + return test_and_set_bit(CONTEXT_BANNED, &ce->flags); +} + +static inline bool +intel_context_force_single_submission(const struct intel_context *ce) +{ + return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); +} + +static inline void +intel_context_set_single_submission(struct intel_context *ce) +{ + __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); +} + +static inline bool +intel_context_nopreempt(const struct intel_context *ce) +{ + return test_bit(CONTEXT_NOPREEMPT, &ce->flags); +} + +static inline void +intel_context_set_nopreempt(struct intel_context *ce) +{ + set_bit(CONTEXT_NOPREEMPT, &ce->flags); +} + +static inline void +intel_context_clear_nopreempt(struct intel_context *ce) +{ + clear_bit(CONTEXT_NOPREEMPT, &ce->flags); +} + #endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 6959b05ae5f8..9527a659546c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -44,7 +44,7 @@ struct intel_context { #define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2) struct i915_address_space *vm; - struct i915_gem_context *gem_context; + struct i915_gem_context __rcu *gem_context; struct list_head signal_link; struct list_head signals; @@ -54,7 +54,13 @@ struct intel_context { struct intel_timeline *timeline; unsigned long flags; -#define CONTEXT_ALLOC_BIT 0 +#define CONTEXT_BARRIER_BIT 0 +#define CONTEXT_ALLOC_BIT 1 +#define CONTEXT_VALID_BIT 2 +#define CONTEXT_USE_SEMAPHORES 3 +#define CONTEXT_BANNED 4 +#define CONTEXT_FORCE_SINGLE_SUBMISSION 5 +#define CONTEXT_NOPREEMPT 6 u32 *lrc_reg_state; u64 lrc_desc; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 93ea367fe624..79ecac5ac0ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -19,6 +19,7 @@ #include "intel_workarounds.h" struct drm_printer; +struct intel_gt; /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, * but keeps the logic simple. Indeed, the whole purpose of this macro is just @@ -28,6 +29,13 @@ struct drm_printer; #define CACHELINE_BYTES 64 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) +#define ENGINE_TRACE(e, fmt, ...) do { \ + const struct intel_engine_cs *e__ __maybe_unused = (e); \ + GEM_TRACE("%s %s: " fmt, \ + dev_name(e__->i915->drm.dev), e__->name, \ + ##__VA_ARGS__); \ +} while (0) + /* * The register defines to be used with the following macros need to accept a * base param, e.g: @@ -89,38 +97,6 @@ struct drm_printer; /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. */ -enum intel_engine_hangcheck_action { - ENGINE_IDLE = 0, - ENGINE_WAIT, - ENGINE_ACTIVE_SEQNO, - ENGINE_ACTIVE_HEAD, - ENGINE_ACTIVE_SUBUNITS, - ENGINE_WAIT_KICK, - ENGINE_DEAD, -}; - -static inline const char * -hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) -{ - switch (a) { - case ENGINE_IDLE: - return "idle"; - case ENGINE_WAIT: - return "wait"; - case ENGINE_ACTIVE_SEQNO: - return "active seqno"; - case ENGINE_ACTIVE_HEAD: - return "active head"; - case ENGINE_ACTIVE_SUBUNITS: - return "active subunits"; - case ENGINE_WAIT_KICK: - return "wait kick"; - case ENGINE_DEAD: - return "dead"; - } - - return "unknown"; -} static inline unsigned int execlists_num_ports(const struct intel_engine_execlists * const execlists) @@ -131,9 +107,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists) static inline struct i915_request * execlists_active(const struct intel_engine_execlists *execlists) { - GEM_BUG_ON(execlists->active - execlists->inflight > - execlists_num_ports(execlists)); - return READ_ONCE(*execlists->active); + return *READ_ONCE(execlists->active); } static inline void @@ -206,132 +180,19 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_HWS_CSB_WRITE_INDEX 0x1f #define CNL_HWS_CSB_WRITE_INDEX 0x2f -struct intel_ring * -intel_engine_create_ring(struct intel_engine_cs *engine, int size); -int intel_ring_pin(struct intel_ring *ring); -void intel_ring_reset(struct intel_ring *ring, u32 tail); -unsigned int intel_ring_update_space(struct intel_ring *ring); -void intel_ring_unpin(struct intel_ring *ring); -void intel_ring_free(struct kref *ref); - -static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) -{ - kref_get(&ring->ref); - return ring; -} - -static inline void intel_ring_put(struct intel_ring *ring) -{ - kref_put(&ring->ref, intel_ring_free); -} - void intel_engine_stop(struct intel_engine_cs *engine); void intel_engine_cleanup(struct intel_engine_cs *engine); -int __must_check intel_ring_cacheline_align(struct i915_request *rq); - -u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); +int intel_engines_init_mmio(struct intel_gt *gt); +int intel_engines_init(struct intel_gt *gt); -static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) -{ - /* Dummy function. - * - * This serves as a placeholder in the code so that the reader - * can compare against the preceding intel_ring_begin() and - * check that the number of dwords emitted matches the space - * reserved for the command packet (i.e. the value passed to - * intel_ring_begin()). - */ - GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); -} - -static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) -{ - return pos & (ring->size - 1); -} - -static inline bool -intel_ring_offset_valid(const struct intel_ring *ring, - unsigned int pos) -{ - if (pos & -ring->size) /* must be strictly within the ring */ - return false; - - if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ - return false; - - return true; -} - -static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) -{ - /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ - u32 offset = addr - rq->ring->vaddr; - GEM_BUG_ON(offset > rq->ring->size); - return intel_ring_wrap(rq->ring, offset); -} - -static inline void -assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) -{ - GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); - - /* - * "Ring Buffer Use" - * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 - * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 - * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 - * "If the Ring Buffer Head Pointer and the Tail Pointer are on the - * same cacheline, the Head Pointer must not be greater than the Tail - * Pointer." - * - * We use ring->head as the last known location of the actual RING_HEAD, - * it may have advanced but in the worst case it is equally the same - * as ring->head and so we should never program RING_TAIL to advance - * into the same cacheline as ring->head. - */ -#define cacheline(a) round_down(a, CACHELINE_BYTES) - GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && - tail < ring->head); -#undef cacheline -} - -static inline unsigned int -intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) -{ - /* Whilst writes to the tail are strictly order, there is no - * serialisation between readers and the writers. The tail may be - * read by i915_request_retire() just as it is being updated - * by execlists, as although the breadcrumb is complete, the context - * switch hasn't been seen. - */ - assert_ring_tail_valid(ring, tail); - ring->tail = tail; - return tail; -} - -static inline unsigned int -__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) -{ - /* - * "If the Ring Buffer Head Pointer and the Tail Pointer are on the - * same cacheline, the Head Pointer must not be greater than the Tail - * Pointer." - */ - GEM_BUG_ON(!is_power_of_2(size)); - return (head - tail - CACHELINE_BYTES) & (size - 1); -} - -int intel_engines_init_mmio(struct drm_i915_private *i915); -int intel_engines_setup(struct drm_i915_private *i915); -int intel_engines_init(struct drm_i915_private *i915); -void intel_engines_cleanup(struct drm_i915_private *i915); +void intel_engines_release(struct intel_gt *gt); +void intel_engines_free(struct intel_gt *gt); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); int intel_ring_submission_setup(struct intel_engine_cs *engine); -int intel_ring_submission_init(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); @@ -352,13 +213,11 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); static inline void -intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine) +intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) { irq_work_queue(&engine->breadcrumbs.irq_work); } -void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine); - void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); @@ -416,14 +275,14 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) static inline void __intel_engine_reset(struct intel_engine_cs *engine, bool stalled) { - if (engine->reset.reset) - engine->reset.reset(engine, stalled); + if (engine->reset.rewind) + engine->reset.rewind(engine, stalled); engine->serial++; /* contexts lost */ } bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engine_is_idle(struct intel_engine_cs *engine); -void intel_engine_flush_submission(struct intel_engine_cs *engine); +bool intel_engine_flush_submission(struct intel_engine_cs *engine); void intel_engines_reset_default_submission(struct intel_gt *gt); @@ -434,61 +293,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...); -static inline void intel_engine_context_in(struct intel_engine_cs *engine) -{ - unsigned long flags; - - if (READ_ONCE(engine->stats.enabled) == 0) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - if (engine->stats.active++ == 0) - engine->stats.start = ktime_get(); - GEM_BUG_ON(engine->stats.active == 0); - } - - write_sequnlock_irqrestore(&engine->stats.lock, flags); -} - -static inline void intel_engine_context_out(struct intel_engine_cs *engine) -{ - unsigned long flags; - - if (READ_ONCE(engine->stats.enabled) == 0) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - ktime_t last; - - if (engine->stats.active && --engine->stats.active == 0) { - /* - * Decrement the active context count and in case GPU - * is now idle add up to the running total. - */ - last = ktime_sub(ktime_get(), engine->stats.start); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } else if (engine->stats.active == 0) { - /* - * After turning on engine stats, context out might be - * the first event in which case we account from the - * time stats gathering was turned on. - */ - last = ktime_sub(ktime_get(), engine->stats.enabled_at); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } - } - - write_sequnlock_irqrestore(&engine->stats.lock, flags); -} - int intel_enable_engine_stats(struct intel_engine_cs *engine); void intel_disable_engine_stats(struct intel_engine_cs *engine); @@ -497,7 +301,7 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine); -u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class); +u32 intel_engine_context_size(struct intel_gt *gt, u8 class); #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) @@ -525,4 +329,22 @@ void intel_engine_init_active(struct intel_engine_cs *engine, #define ENGINE_MOCK 1 #define ENGINE_VIRTUAL 2 +static inline bool +intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return false; + + return intel_engine_has_preemption(engine); +} + +static inline bool +intel_engine_has_timeslices(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return false; + + return intel_engine_has_semaphores(engine); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 051734c9b733..ddf9543b1261 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -28,15 +28,16 @@ #include "i915_drv.h" -#include "gt/intel_gt.h" - +#include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" #include "intel_engine_pool.h" #include "intel_engine_user.h" -#include "intel_context.h" +#include "intel_gt.h" +#include "intel_gt_requests.h" #include "intel_lrc.h" #include "intel_reset.h" +#include "intel_ring.h" /* Haswell does have the CXT_SIZE register however it does not appear to be * valid. Now, docs explain in dwords what is in the context object. The full @@ -140,7 +141,7 @@ static const struct engine_info intel_engines[] = { /** * intel_engine_context_size() - return the size of the context for an engine - * @dev_priv: i915 device private + * @gt: the gt * @class: engine class * * Each engine class may require a different amount of space for a context @@ -152,17 +153,18 @@ static const struct engine_info intel_engines[] = { * in LRC mode, but does not include the "shared data page" used with * GuC submission. The caller should account for this if using the GuC. */ -u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) +u32 intel_engine_context_size(struct intel_gt *gt, u8 class) { + struct intel_uncore *uncore = gt->uncore; u32 cxt_size; BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); switch (class) { case RENDER_CLASS: - switch (INTEL_GEN(dev_priv)) { + switch (INTEL_GEN(gt->i915)) { default: - MISSING_CASE(INTEL_GEN(dev_priv)); + MISSING_CASE(INTEL_GEN(gt->i915)); return DEFAULT_LR_CONTEXT_RENDER_SIZE; case 12: case 11: @@ -174,14 +176,14 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) case 8: return GEN8_LR_CONTEXT_RENDER_SIZE; case 7: - if (IS_HASWELL(dev_priv)) + if (IS_HASWELL(gt->i915)) return HSW_CXT_TOTAL_SIZE; - cxt_size = I915_READ(GEN7_CXT_SIZE); + cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE); return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, PAGE_SIZE); case 6: - cxt_size = I915_READ(CXT_SIZE); + cxt_size = intel_uncore_read(uncore, CXT_SIZE); return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, PAGE_SIZE); case 5: @@ -196,9 +198,9 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) * minimum allocation anyway so it should all come * out in the wash. */ - cxt_size = I915_READ(CXT_SIZE) + 1; + cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n", - INTEL_GEN(dev_priv), + INTEL_GEN(gt->i915), cxt_size * 64, cxt_size - 1); return round_up(cxt_size * 64, PAGE_SIZE); @@ -215,7 +217,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: - if (INTEL_GEN(dev_priv) < 8) + if (INTEL_GEN(gt->i915) < 8) return 0; return GEN8_LR_CONTEXT_OTHER_SIZE; } @@ -308,14 +310,16 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->instance = info->instance; __sprint_engine_name(engine); - /* - * To be overridden by the backend on setup. However to facilitate - * cleanup on error during setup, we always provide the destroy vfunc. - */ - engine->destroy = (typeof(engine->destroy))kfree; + engine->props.heartbeat_interval_ms = + CONFIG_DRM_I915_HEARTBEAT_INTERVAL; + engine->props.preempt_timeout_ms = + CONFIG_DRM_I915_PREEMPT_TIMEOUT; + engine->props.stop_timeout_ms = + CONFIG_DRM_I915_STOP_TIMEOUT; + engine->props.timeslice_duration_ms = + CONFIG_DRM_I915_TIMESLICE_DURATION; - engine->context_size = intel_engine_context_size(gt->i915, - engine->class); + engine->context_size = intel_engine_context_size(gt, engine->class); if (WARN_ON(engine->context_size > BIT(20))) engine->context_size = 0; if (engine->context_size) @@ -324,6 +328,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; + ewma__engine_latency_init(&engine->latency); seqlock_init(&engine->stats.lock); ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); @@ -334,7 +339,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) gt->engine_class[info->class][info->instance] = engine; gt->engine[id] = engine; - intel_engine_add_user(engine); gt->i915->engine[id] = engine; return 0; @@ -370,38 +374,58 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) } } -static void intel_setup_engine_capabilities(struct drm_i915_private *i915) +static void intel_setup_engine_capabilities(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) __setup_engine_capabilities(engine); } /** - * intel_engines_cleanup() - free the resources allocated for Command Streamers - * @i915: the i915 devic + * intel_engines_release() - free the resources allocated for Command Streamers + * @gt: pointer to struct intel_gt */ -void intel_engines_cleanup(struct drm_i915_private *i915) +void intel_engines_release(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { - engine->destroy(engine); - i915->engine[id] = NULL; + /* Decouple the backend; but keep the layout for late GPU resets */ + for_each_engine(engine, gt, id) { + if (!engine->release) + continue; + + engine->release(engine); + engine->release = NULL; + + memset(&engine->reset, 0, sizeof(engine->reset)); + + gt->i915->engine[id] = NULL; + } +} + +void intel_engines_free(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + kfree(engine); + gt->engine[id] = NULL; } } /** * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers - * @i915: the i915 device + * @gt: pointer to struct intel_gt * * Return: non-zero if the initialization failed. */ -int intel_engines_init_mmio(struct drm_i915_private *i915) +int intel_engines_init_mmio(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; struct intel_device_info *device_info = mkwrite_device_info(i915); const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask; unsigned int mask = 0; @@ -419,7 +443,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) if (!HAS_ENGINE(i915, i)) continue; - err = intel_engine_setup(&i915->gt, i); + err = intel_engine_setup(gt, i); if (err) goto cleanup; @@ -436,45 +460,14 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) RUNTIME_INFO(i915)->num_engines = hweight32(mask); - intel_gt_check_and_clear_faults(&i915->gt); + intel_gt_check_and_clear_faults(gt); - intel_setup_engine_capabilities(i915); + intel_setup_engine_capabilities(gt); return 0; cleanup: - intel_engines_cleanup(i915); - return err; -} - -/** - * intel_engines_init() - init the Engine Command Streamers - * @i915: i915 device private - * - * Return: non-zero if the initialization failed. - */ -int intel_engines_init(struct drm_i915_private *i915) -{ - int (*init)(struct intel_engine_cs *engine); - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err; - - if (HAS_EXECLISTS(i915)) - init = intel_execlists_submission_init; - else - init = intel_ring_submission_init; - - for_each_engine(engine, i915, id) { - err = init(engine); - if (err) - goto cleanup; - } - - return 0; - -cleanup: - intel_engines_cleanup(i915); + intel_engines_free(gt); return err; } @@ -518,7 +511,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine, unsigned int flags; flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) + if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) /* * On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. @@ -589,7 +582,7 @@ err: return ret; } -static int intel_engine_setup_common(struct intel_engine_cs *engine) +static int engine_setup_common(struct intel_engine_cs *engine) { int err; @@ -602,9 +595,9 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_active(engine, ENGINE_PHYSICAL); intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); - intel_engine_init_hangcheck(engine); intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); + intel_engine_init_retire(engine); intel_engine_pool_init(&engine->pool); @@ -619,49 +612,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) return 0; } -/** - * intel_engines_setup- setup engine state not requiring hw access - * @i915: Device to setup. - * - * Initializes engine structure members shared between legacy and execlists - * submission modes which do not require hardware access. - * - * Typically done early in the submission mode specific engine setup stage. - */ -int intel_engines_setup(struct drm_i915_private *i915) -{ - int (*setup)(struct intel_engine_cs *engine); - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err; - - if (HAS_EXECLISTS(i915)) - setup = intel_execlists_submission_setup; - else - setup = intel_ring_submission_setup; - - for_each_engine(engine, i915, id) { - err = intel_engine_setup_common(engine); - if (err) - goto cleanup; - - err = setup(engine); - if (err) - goto cleanup; - - /* We expect the backend to take control over its state */ - GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree); - - GEM_BUG_ON(!engine->cops); - } - - return 0; - -cleanup: - intel_engines_cleanup(i915); - return err; -} - struct measure_breadcrumb { struct i915_request rq; struct intel_timeline timeline; @@ -745,13 +695,13 @@ create_kernel_context(struct intel_engine_cs *engine) struct intel_context *ce; int err; - ce = intel_context_create(engine->i915->kernel_context, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return ce; - ce->ring = __intel_context_ring_size(SZ_4K); + __set_bit(CONTEXT_BARRIER_BIT, &ce->flags); - err = intel_context_pin(ce); + err = intel_context_pin(ce); /* perma-pin so it is always available */ if (err) { intel_context_put(ce); return ERR_PTR(err); @@ -779,13 +729,19 @@ create_kernel_context(struct intel_engine_cs *engine) * * Returns zero on success or an error code on failure. */ -int intel_engine_init_common(struct intel_engine_cs *engine) +static int engine_init_common(struct intel_engine_cs *engine) { struct intel_context *ce; int ret; engine->set_default_submission(engine); + ret = measure_breadcrumb_dw(engine); + if (ret < 0) + return ret; + + engine->emit_fini_breadcrumb_dw = ret; + /* * We may need to do things with the shrinker which * require us to immediately switch back to the default @@ -800,18 +756,38 @@ int intel_engine_init_common(struct intel_engine_cs *engine) engine->kernel_context = ce; - ret = measure_breadcrumb_dw(engine); - if (ret < 0) - goto err_unpin; + return 0; +} - engine->emit_fini_breadcrumb_dw = ret; +int intel_engines_init(struct intel_gt *gt) +{ + int (*setup)(struct intel_engine_cs *engine); + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; - return 0; + if (HAS_EXECLISTS(gt->i915)) + setup = intel_execlists_submission_setup; + else + setup = intel_ring_submission_setup; -err_unpin: - intel_context_unpin(ce); - intel_context_put(ce); - return ret; + for_each_engine(engine, gt, id) { + err = engine_setup_common(engine); + if (err) + return err; + + err = setup(engine); + if (err) + return err; + + err = engine_init_common(engine); + if (err) + return err; + + intel_engine_add_user(engine); + } + + return 0; } /** @@ -824,9 +800,11 @@ err_unpin: void intel_engine_cleanup_common(struct intel_engine_cs *engine) { GEM_BUG_ON(!list_empty(&engine->active.requests)); + tasklet_kill(&engine->execlists.tasklet); /* flush the callback */ cleanup_status_page(engine); + intel_engine_fini_retire(engine); intel_engine_pool_fini(&engine->pool); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); @@ -873,6 +851,21 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) return bbaddr; } +static unsigned long stop_timeout(const struct intel_engine_cs *engine) +{ + if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */ + return 0; + + /* + * If we are doing a normal GPU reset, we can take our time and allow + * the engine to quiesce. We've stopped submission to the engine, and + * if we wait long enough an innocent context should complete and + * leave the engine idle. So they should not be caught unaware by + * the forthcoming GPU reset (which usually follows the stop_cs)! + */ + return READ_ONCE(engine->props.stop_timeout_ms); +} + int intel_engine_stop_cs(struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; @@ -883,16 +876,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) if (INTEL_GEN(engine->i915) < 3) return -ENODEV; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); err = 0; if (__intel_wait_for_register_fw(uncore, mode, MODE_IDLE, MODE_IDLE, - 1000, 0, + 1000, stop_timeout(engine), NULL)) { - GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name); + ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n"); err = -ETIMEDOUT; } @@ -904,7 +897,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) { - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -1054,9 +1047,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return idle; } -void intel_engine_flush_submission(struct intel_engine_cs *engine) +bool intel_engine_flush_submission(struct intel_engine_cs *engine) { struct tasklet_struct *t = &engine->execlists.tasklet; + bool active = tasklet_is_locked(t); if (__tasklet_is_scheduled(t)) { local_bh_disable(); @@ -1067,10 +1061,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine) tasklet_unlock(t); } local_bh_enable(); + active = true; } /* Otherwise flush the tasklet if it was running on another cpu */ tasklet_unlock_wait(t); + + return active; } /** @@ -1318,10 +1315,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, unsigned int idx; u8 read, write; - drm_printf(m, "\tExeclist tasklet queued? %s (%s), timeslice? %s\n", + drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count)), + repr_timer(&engine->execlists.preempt), repr_timer(&engine->execlists.timer)); read = execlists->csb_head; @@ -1345,6 +1343,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } execlists_active_lock_bh(execlists); + rcu_read_lock(); for (port = execlists->active; (rq = *port); port++) { char hdr[80]; int len; @@ -1382,6 +1381,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, if (tl) intel_timeline_put(tl); } + rcu_read_unlock(); execlists_active_unlock_bh(execlists); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", @@ -1447,8 +1447,17 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "*** WEDGED ***\n"); drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); - drm_printf(m, "\tHangcheck: %d ms ago\n", - jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); + drm_printf(m, "\tBarriers?: %s\n", + yesno(!llist_empty(&engine->barrier_tasks))); + drm_printf(m, "\tLatency: %luus\n", + ewma__engine_latency_read(&engine->latency)); + + rcu_read_lock(); + rq = READ_ONCE(engine->heartbeat.systole); + if (rq) + drm_printf(m, "\tHeartbeat: %d ms ago\n", + jiffies_to_msecs(jiffies - rq->emitted_jiffies)); + rcu_read_unlock(); drm_printf(m, "\tReset count: %d (global %d)\n", i915_reset_engine_count(error, engine), i915_reset_count(error)); @@ -1481,9 +1490,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, print_request_ring(m, rq); - if (rq->hw_context->lrc_reg_state) { + if (rq->context->lrc_reg_state) { drm_printf(m, "Logical Ring Context:\n"); - hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE); + hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE); } } spin_unlock_irqrestore(&engine->active.lock, flags); @@ -1544,7 +1553,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) for (port = execlists->pending; (rq = *port); port++) { /* Exclude any contexts already counted in active */ - if (!intel_context_inflight_count(rq->hw_context)) + if (!intel_context_inflight_count(rq->context)) engine->stats.active++; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c new file mode 100644 index 000000000000..742628e40201 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -0,0 +1,242 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_request.h" + +#include "intel_context.h" +#include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" +#include "intel_engine.h" +#include "intel_gt.h" +#include "intel_reset.h" + +/* + * While the engine is active, we send a periodic pulse along the engine + * to check on its health and to flush any idle-barriers. If that request + * is stuck, and we fail to preempt it, we declare the engine hung and + * issue a reset -- in the hope that restores progress. + */ + +static bool next_heartbeat(struct intel_engine_cs *engine) +{ + long delay; + + delay = READ_ONCE(engine->props.heartbeat_interval_ms); + if (!delay) + return false; + + delay = msecs_to_jiffies_timeout(delay); + if (delay >= HZ) + delay = round_jiffies_up_relative(delay); + schedule_delayed_work(&engine->heartbeat.work, delay); + + return true; +} + +static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) +{ + engine->wakeref_serial = READ_ONCE(engine->serial) + 1; + i915_request_add_active_barriers(rq); +} + +static void show_heartbeat(const struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct drm_printer p = drm_debug_printer("heartbeat"); + + intel_engine_dump(engine, &p, + "%s heartbeat {prio:%d} not ticking\n", + engine->name, + rq->sched.attr.priority); +} + +static void heartbeat(struct work_struct *wrk) +{ + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN), + }; + struct intel_engine_cs *engine = + container_of(wrk, typeof(*engine), heartbeat.work.work); + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + + rq = engine->heartbeat.systole; + if (rq && i915_request_completed(rq)) { + i915_request_put(rq); + engine->heartbeat.systole = NULL; + } + + if (!intel_engine_pm_get_if_awake(engine)) + return; + + if (intel_gt_is_wedged(engine->gt)) + goto out; + + if (engine->heartbeat.systole) { + if (engine->schedule && + rq->sched.attr.priority < I915_PRIORITY_BARRIER) { + /* + * Gradually raise the priority of the heartbeat to + * give high priority work [which presumably desires + * low latency and no jitter] the chance to naturally + * complete before being preempted. + */ + attr.priority = I915_PRIORITY_MASK; + if (rq->sched.attr.priority >= attr.priority) + attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT); + if (rq->sched.attr.priority >= attr.priority) + attr.priority = I915_PRIORITY_BARRIER; + + local_bh_disable(); + engine->schedule(rq, &attr); + local_bh_enable(); + } else { + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + show_heartbeat(rq, engine); + + intel_gt_handle_error(engine->gt, engine->mask, + I915_ERROR_CAPTURE, + "stopped heartbeat on %s", + engine->name); + } + goto out; + } + + if (engine->wakeref_serial == engine->serial) + goto out; + + mutex_lock(&ce->timeline->mutex); + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) + goto unlock; + + idle_pulse(engine, rq); + if (i915_modparams.enable_hangcheck) + engine->heartbeat.systole = i915_request_get(rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + +unlock: + mutex_unlock(&ce->timeline->mutex); +out: + if (!next_heartbeat(engine)) + i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); + intel_engine_pm_put(engine); +} + +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + return; + + next_heartbeat(engine); +} + +void intel_engine_park_heartbeat(struct intel_engine_cs *engine) +{ + if (cancel_delayed_work(&engine->heartbeat.work)) + i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); +} + +void intel_engine_init_heartbeat(struct intel_engine_cs *engine) +{ + INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat); +} + +int intel_engine_set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay) +{ + int err; + + /* Send one last pulse before to cleanup persistent hogs */ + if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) { + err = intel_engine_pulse(engine); + if (err) + return err; + } + + WRITE_ONCE(engine->props.heartbeat_interval_ms, delay); + + if (intel_engine_pm_get_if_awake(engine)) { + if (delay) + intel_engine_unpark_heartbeat(engine); + else + intel_engine_park_heartbeat(engine); + intel_engine_pm_put(engine); + } + + return 0; +} + +int intel_engine_pulse(struct intel_engine_cs *engine) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + int err = 0; + + if (!intel_engine_has_preemption(engine)) + return -ENODEV; + + if (!intel_engine_pm_get_if_awake(engine)) + return 0; + + if (mutex_lock_interruptible(&ce->timeline->mutex)) + goto out_rpm; + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unlock; + } + + rq->flags |= I915_REQUEST_SENTINEL; + idle_pulse(engine, rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + +out_unlock: + mutex_unlock(&ce->timeline->mutex); +out_rpm: + intel_engine_pm_put(engine); + return err; +} + +int intel_engine_flush_barriers(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + int err = 0; + + if (llist_empty(&engine->barrier_tasks)) + return 0; + + if (!intel_engine_pm_get_if_awake(engine)) + return 0; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_rpm; + } + + idle_pulse(engine, rq); + i915_request_add(rq); + +out_rpm: + intel_engine_pm_put(engine); + return err; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_engine_heartbeat.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h new file mode 100644 index 000000000000..a7b8c0f9e005 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_HEARTBEAT_H +#define INTEL_ENGINE_HEARTBEAT_H + +struct intel_engine_cs; + +void intel_engine_init_heartbeat(struct intel_engine_cs *engine); + +int intel_engine_set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay); + +void intel_engine_park_heartbeat(struct intel_engine_cs *engine); +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine); + +int intel_engine_pulse(struct intel_engine_cs *engine); +int intel_engine_flush_barriers(struct intel_engine_cs *engine); + +#endif /* INTEL_ENGINE_HEARTBEAT_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 67eb6183648a..010620b78202 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -6,12 +6,15 @@ #include "i915_drv.h" +#include "intel_context.h" #include "intel_engine.h" +#include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_engine_pool.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_rc6.h" +#include "intel_ring.h" static int __engine_unpark(struct intel_wakeref *wf) { @@ -19,7 +22,7 @@ static int __engine_unpark(struct intel_wakeref *wf) container_of(wf, typeof(*engine), wakeref); void *map; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); intel_gt_pm_get(engine->gt); @@ -34,7 +37,7 @@ static int __engine_unpark(struct intel_wakeref *wf) if (engine->unpark) engine->unpark(engine); - intel_engine_init_hangcheck(engine); + intel_engine_unpark_heartbeat(engine); return 0; } @@ -53,7 +56,7 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce) static inline void __timeline_mark_unlock(struct intel_context *ce, unsigned long flags) { - mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); + mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); local_irq_restore(flags); } @@ -71,12 +74,57 @@ static inline void __timeline_mark_unlock(struct intel_context *ce, #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ +static void duration(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct i915_request *rq = to_request(fence); + + ewma__engine_latency_add(&rq->engine->latency, + ktime_us_delta(rq->fence.timestamp, + rq->duration.emitted)); +} + +static void +__queue_and_release_pm(struct i915_request *rq, + struct intel_timeline *tl, + struct intel_engine_cs *engine) +{ + struct intel_gt_timelines *timelines = &engine->gt->timelines; + + ENGINE_TRACE(engine, "\n"); + + /* + * We have to serialise all potential retirement paths with our + * submission, as we don't want to underflow either the + * engine->wakeref.counter or our timeline->active_count. + * + * Equally, we cannot allow a new submission to start until + * after we finish queueing, nor could we allow that submitter + * to retire us before we are ready! + */ + spin_lock(&timelines->lock); + + /* Let intel_gt_retire_requests() retire us (acquired under lock) */ + if (!atomic_fetch_inc(&tl->active_count)) + list_add_tail(&tl->link, &timelines->active_list); + + /* Hand the request over to HW and so engine_retire() */ + __i915_request_queue(rq, NULL); + + /* Let new submissions commence (and maybe retire this timeline) */ + __intel_wakeref_defer_park(&engine->wakeref); + + spin_unlock(&timelines->lock); +} + static bool switch_to_kernel_context(struct intel_engine_cs *engine) { + struct intel_context *ce = engine->kernel_context; struct i915_request *rq; unsigned long flags; bool result = true; + GEM_BUG_ON(!intel_context_is_barrier(ce)); + /* Already inside the kernel context, safe to power down. */ if (engine->wakeref_serial == engine->serial) return true; @@ -96,31 +144,56 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) * This should hold true as we can only park the engine after * retiring the last request, thus all rings should be empty and * all timelines idle. + * + * For unlocking, there are 2 other parties and the GPU who have a + * stake here. + * + * A new gpu user will be waiting on the engine-pm to start their + * engine_unpark. New waiters are predicated on engine->wakeref.count + * and so intel_wakeref_defer_park() acts like a mutex_unlock of the + * engine->wakeref. + * + * The other party is intel_gt_retire_requests(), which is walking the + * list of active timelines looking for completions. Meanwhile as soon + * as we call __i915_request_queue(), the GPU may complete our request. + * Ergo, if we put ourselves on the timelines.active_list + * (se intel_timeline_enter()) before we increment the + * engine->wakeref.count, we may see the request completion and retire + * it causing an undeflow of the engine->wakeref. */ - flags = __timeline_mark_lock(engine->kernel_context); + flags = __timeline_mark_lock(ce); + GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); - rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); + rq = __i915_request_create(ce, GFP_NOWAIT); if (IS_ERR(rq)) /* Context switch failed, hope for the best! Maybe reset? */ goto out_unlock; - intel_timeline_enter(i915_request_timeline(rq)); - /* Check again on the next retirement. */ engine->wakeref_serial = engine->serial + 1; i915_request_add_active_barriers(rq); /* Install ourselves as a preemption barrier */ - rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE; - __i915_request_commit(rq); + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */ + /* + * Use an interrupt for precise measurement of duration, + * otherwise we rely on someone else retiring all the requests + * which may delay the signaling (i.e. we will likely wait + * until the background request retirement running every + * second or two). + */ + BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq)); + dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration); + rq->duration.emitted = ktime_get(); + } - /* Release our exclusive hold on the engine */ - __intel_wakeref_defer_park(&engine->wakeref); - __i915_request_queue(rq, NULL); + /* Expose ourselves to the world */ + __queue_and_release_pm(rq, ce->timeline, engine); result = false; out_unlock: - __timeline_mark_unlock(engine->kernel_context, flags); + __timeline_mark_unlock(ce, flags); return result; } @@ -133,7 +206,7 @@ static void call_idle_barriers(struct intel_engine_cs *engine) container_of((struct list_head *)node, typeof(*cb), node); - cb->func(NULL, cb); + cb->func(ERR_PTR(-EAGAIN), cb); } } @@ -154,10 +227,11 @@ static int __engine_park(struct intel_wakeref *wf) if (!switch_to_kernel_context(engine)) return -EBUSY; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); call_idle_barriers(engine); /* cleanup after wedging */ + intel_engine_park_heartbeat(engine); intel_engine_disarm_breadcrumbs(engine); intel_engine_pool_park(&engine->pool); @@ -174,7 +248,8 @@ static int __engine_park(struct intel_wakeref *wf) engine->execlists.no_priolist = false; - intel_gt_pm_put(engine->gt); + /* While gt calls i915_vma_parked(), we have to break the lock cycle */ + intel_gt_pm_put_async(engine->gt); return 0; } @@ -188,6 +263,7 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) struct intel_runtime_pm *rpm = engine->uncore->rpm; intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); + intel_engine_init_heartbeat(engine); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index 739c50fefcef..e52c2b0cb245 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -7,6 +7,7 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "i915_request.h" #include "intel_engine_types.h" #include "intel_wakeref.h" @@ -31,6 +32,36 @@ static inline void intel_engine_pm_put(struct intel_engine_cs *engine) intel_wakeref_put(&engine->wakeref); } +static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine) +{ + intel_wakeref_put_async(&engine->wakeref); +} + +static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) +{ + intel_wakeref_unlock_wait(&engine->wakeref); +} + +static inline struct i915_request * +intel_engine_create_kernel_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + /* + * The engine->kernel_context is special as it is used inside + * the engine-pm barrier (see __engine_park()), circumventing + * the usual mutexes and relying on the engine-pm barrier + * instead. So whenever we use the engine->kernel_context + * outside of the barrier, we must manually handle the + * engine wakeref to serialise with the use inside. + */ + intel_engine_pm_get(engine); + rq = i915_request_create(engine->kernel_context); + intel_engine_pm_put(engine); + + return rq; +} + void intel_engine_init__pm(struct intel_engine_cs *engine); #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c index 3cdbd5f8b5be..397186818305 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -104,6 +104,8 @@ node_create(struct intel_engine_pool *pool, size_t sz) return ERR_CAST(obj); } + i915_gem_object_set_readonly(obj); + node->obj = obj; return node; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 3451be034caf..00287515e7af 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -7,6 +7,7 @@ #ifndef __INTEL_ENGINE_TYPES__ #define __INTEL_ENGINE_TYPES__ +#include <linux/average.h> #include <linux/hashtable.h> #include <linux/irq_work.h> #include <linux/kref.h> @@ -15,6 +16,7 @@ #include <linux/rbtree.h> #include <linux/timer.h> #include <linux/types.h> +#include <linux/workqueue.h> #include "i915_gem.h" #include "i915_pmu.h" @@ -58,6 +60,7 @@ struct i915_gem_context; struct i915_request; struct i915_sched_attr; struct intel_gt; +struct intel_ring; struct intel_uncore; typedef u8 intel_engine_mask_t; @@ -76,40 +79,6 @@ struct intel_instdone { u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; }; -struct intel_engine_hangcheck { - u64 acthd; - u32 last_ring; - u32 last_head; - unsigned long action_timestamp; - struct intel_instdone instdone; -}; - -struct intel_ring { - struct kref ref; - struct i915_vma *vma; - void *vaddr; - - /* - * As we have two types of rings, one global to the engine used - * by ringbuffer submission and those that are exclusive to a - * context used by execlists, we have to play safe and allow - * atomic updates to the pin_count. However, the actual pinning - * of the context is either done during initialisation for - * ringbuffer submission or serialised as part of the context - * pinning for execlists, and so we do not need a mutex ourselves - * to serialise intel_ring_pin/intel_ring_unpin. - */ - atomic_t pin_count; - - u32 head; - u32 tail; - u32 emit; - - u32 space; - u32 size; - u32 effective_size; -}; - /* * we use a single page to load ctx workarounds so all of these * values are referred in terms of dwords @@ -151,6 +120,9 @@ enum intel_engine_id { #define INVALID_ENGINE ((enum intel_engine_id)-1) }; +/* A simple estimator for the round-trip latency of an engine */ +DECLARE_EWMA(_engine_latency, 6, 4) + struct st_preempt_hang { struct completion completion; unsigned int count; @@ -175,6 +147,11 @@ struct intel_engine_execlists { struct timer_list timer; /** + * @preempt: reset the current context if it fails to give way + */ + struct timer_list preempt; + + /** * @default_priolist: priority list for I915_PRIORITY_NORMAL */ struct i915_priolist default_priolist; @@ -326,6 +303,11 @@ struct intel_engine_cs { intel_engine_mask_t saturated; /* submitting semaphores too late? */ + struct { + struct delayed_work work; + struct i915_request *systole; + } heartbeat; + unsigned long serial; unsigned long wakeref_serial; @@ -338,6 +320,13 @@ struct intel_engine_cs { struct intel_timeline *timeline; } legacy; + /* + * We track the average duration of the idle pulse on parking the + * engine to keep an estimate of the how the fast the engine is + * under ideal conditions. + */ + struct ewma__engine_latency latency; + /* Rather than have every client wait upon all user interrupts, * with the herd waking after every interrupt and each doing the * heavyweight seqno dance, we delegate the task (of being the @@ -411,7 +400,10 @@ struct intel_engine_cs { struct { void (*prepare)(struct intel_engine_cs *engine); - void (*reset)(struct intel_engine_cs *engine, bool stalled); + + void (*rewind)(struct intel_engine_cs *engine, bool stalled); + void (*cancel)(struct intel_engine_cs *engine); + void (*finish)(struct intel_engine_cs *engine); } reset; @@ -461,30 +453,29 @@ struct intel_engine_cs { void (*schedule)(struct i915_request *request, const struct i915_sched_attr *attr); - /* - * Cancel all requests on the hardware, or queued for execution. - * This should only cancel the ready requests that have been - * submitted to the engine (via the engine->submit_request callback). - * This is called when marking the device as wedged. - */ - void (*cancel_requests)(struct intel_engine_cs *engine); - - void (*destroy)(struct intel_engine_cs *engine); + void (*release)(struct intel_engine_cs *engine); struct intel_engine_execlists execlists; + /* + * Keep track of completed timelines on this engine for early + * retirement with the goal of quickly enabling powersaving as + * soon as the engine is idle. + */ + struct intel_timeline *retire; + struct work_struct retire_work; + /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; - struct intel_engine_hangcheck hangcheck; - -#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) +#define I915_ENGINE_USING_CMD_PARSER BIT(0) #define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_IS_VIRTUAL BIT(5) #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) unsigned int flags; /* @@ -542,12 +533,25 @@ struct intel_engine_cs { */ ktime_t total; } stats; + + struct { + unsigned long heartbeat_interval_ms; + unsigned long preempt_timeout_ms; + unsigned long stop_timeout_ms; + unsigned long timeslice_duration_ms; + } props; }; static inline bool -intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_USING_CMD_PARSER; +} + +static inline bool +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) { - return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; } static inline bool diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 4294f146f13c..51b8718513bc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -7,6 +7,8 @@ #ifndef _INTEL_GPU_COMMANDS_H_ #define _INTEL_GPU_COMMANDS_H_ +#include <linux/bitops.h> + /* * Target address alignments required for GPU access e.g. * MI_STORE_DWORD_IMM. @@ -319,4 +321,31 @@ #define COLOR_BLT ((0x2<<29)|(0x40<<22)) #define SRC_COPY_BLT ((0x2<<29)|(0x43<<22)) +/* + * Used to convert any address to canonical form. + * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, + * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the + * addresses to be in a canonical form: + * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct + * canonical form [63:48] == [47]." + */ +#define GEN8_HIGH_ADDRESS_BIT 47 +static inline u64 gen8_canonical_addr(u64 address) +{ + return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); +} + +static inline u64 gen8_noncanonical_addr(u64 address) +{ + return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); +} + +static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags) +{ + *cs++ = MI_BATCH_BUFFER_START | flags; + *cs++ = addr; + + return cs; +} + #endif /* _INTEL_GPU_COMMANDS_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 1c4b6c9642ad..ec84b5e62fef 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -3,12 +3,16 @@ * Copyright © 2019 Intel Corporation */ +#include "debugfs_gt.h" #include "i915_drv.h" +#include "intel_context.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" #include "intel_mocs.h" #include "intel_rc6.h" +#include "intel_renderstate.h" +#include "intel_rps.h" #include "intel_uncore.h" #include "intel_pm.h" @@ -22,19 +26,20 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); - intel_gt_init_hangcheck(gt); intel_gt_init_reset(gt); intel_gt_init_requests(gt); + intel_gt_init_timelines(gt); intel_gt_pm_init_early(gt); + + intel_rps_init_early(>->rps); intel_uc_init_early(>->uc); } -void intel_gt_init_hw_early(struct drm_i915_private *i915) +void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) { - i915->gt.ggtt = &i915->ggtt; + gt->ggtt = ggtt; - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_gt_pm_disable(&i915->gt); + intel_gt_sanitize(gt, false); } static void init_unused_ring(struct intel_gt *gt, u32 base) @@ -72,7 +77,6 @@ int intel_gt_init_hw(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; int ret; - BUG_ON(!i915->kernel_context); ret = intel_gt_terminally_wedged(gt); if (ret) return ret; @@ -302,7 +306,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) intel_gt_chipset_flush(gt); - with_intel_runtime_pm(uncore->rpm, wakeref) { + with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) { unsigned long flags; spin_lock_irqsave(&uncore->lock, flags); @@ -321,8 +325,9 @@ void intel_gt_chipset_flush(struct intel_gt *gt) void intel_gt_driver_register(struct intel_gt *gt) { - if (IS_GEN(gt->i915, 5)) - intel_gpu_ips_init(gt->i915); + intel_rps_driver_register(>->rps); + + debugfs_gt_register(gt); } static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) @@ -364,41 +369,297 @@ static void intel_gt_fini_scratch(struct intel_gt *gt) i915_vma_unpin_and_release(>->scratch, 0); } +static struct i915_address_space *kernel_vm(struct intel_gt *gt) +{ + if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) + return &i915_ppgtt_create(gt->i915)->vm; + else + return i915_vm_get(>->ggtt->vm); +} + +static int __intel_context_flush_retire(struct intel_context *ce) +{ + struct intel_timeline *tl; + + tl = intel_context_timeline_lock(ce); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + intel_context_timeline_unlock(tl); + return 0; +} + +static int __engines_record_defaults(struct intel_gt *gt) +{ + struct i915_request *requests[I915_NUM_ENGINES] = {}; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * As we reset the gpu during very early sanitisation, the current + * register state on the GPU should reflect its defaults values. + * We load a context onto the hw (with restore-inhibit), then switch + * over to a second context to save that default register state. We + * can then prime every new context with that state so they all start + * from the same default HW values. + */ + + for_each_engine(engine, gt, id) { + struct intel_renderstate so; + struct intel_context *ce; + struct i915_request *rq; + + err = intel_renderstate_init(&so, engine); + if (err) + goto out; + + /* We must be able to switch to something! */ + GEM_BUG_ON(!engine->kernel_context); + engine->serial++; /* force the kernel context switch */ + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + intel_context_put(ce); + goto out; + } + + err = intel_engine_emit_ctx_wa(rq); + if (err) + goto err_rq; + + err = intel_renderstate_emit(&so, rq); + if (err) + goto err_rq; + +err_rq: + requests[id] = i915_request_get(rq); + i915_request_add(rq); + intel_renderstate_fini(&so); + if (err) + goto out; + } + + /* Flush the default context image to memory, and enable powersaving. */ + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { + err = -EIO; + goto out; + } + + for (id = 0; id < ARRAY_SIZE(requests); id++) { + struct i915_request *rq; + struct i915_vma *state; + void *vaddr; + + rq = requests[id]; + if (!rq) + continue; + + GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); + state = rq->context->state; + if (!state) + continue; + + /* Serialise with retirement on another CPU */ + GEM_BUG_ON(!i915_request_completed(rq)); + err = __intel_context_flush_retire(rq->context); + if (err) + goto out; + + /* We want to be able to unbind the state from the GGTT */ + GEM_BUG_ON(intel_context_is_pinned(rq->context)); + + /* + * As we will hold a reference to the logical state, it will + * not be torn down with the context, and importantly the + * object will hold onto its vma (making it possible for a + * stray GTT write to corrupt our defaults). Unmap the vma + * from the GTT to prevent such accidents and reclaim the + * space. + */ + err = i915_vma_unbind(state); + if (err) + goto out; + + i915_gem_object_lock(state->obj); + err = i915_gem_object_set_to_cpu_domain(state->obj, false); + i915_gem_object_unlock(state->obj); + if (err) + goto out; + + i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC); + + /* Check we can acquire the image of the context state */ + vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out; + } + + rq->engine->default_state = i915_gem_object_get(state->obj); + i915_gem_object_unpin_map(state->obj); + } + +out: + /* + * If we have to abandon now, we expect the engines to be idle + * and ready to be torn-down. The quickest way we can accomplish + * this is by declaring ourselves wedged. + */ + if (err) + intel_gt_set_wedged(gt); + + for (id = 0; id < ARRAY_SIZE(requests); id++) { + struct intel_context *ce; + struct i915_request *rq; + + rq = requests[id]; + if (!rq) + continue; + + ce = rq->context; + i915_request_put(rq); + intel_context_put(ce); + } + return err; +} + +static int __engines_verify_workarounds(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return 0; + + for_each_engine(engine, gt, id) { + if (intel_engine_verify_workarounds(engine, "load")) + err = -EIO; + } + + return err; +} + +static void __intel_gt_disable(struct intel_gt *gt) +{ + intel_gt_set_wedged_on_init(gt); + + intel_gt_suspend_prepare(gt); + intel_gt_suspend_late(gt); + + GEM_BUG_ON(intel_gt_pm_is_awake(gt)); +} + int intel_gt_init(struct intel_gt *gt) { int err; - err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); + err = i915_inject_probe_error(gt->i915, -ENODEV); if (err) return err; + /* + * This is just a security blanket to placate dragons. + * On some systems, we very sporadically observe that the first TLBs + * used by the CS may be stale, despite us poking the TLB reset. If + * we hold the forcewake during initialisation these problems + * just magically go away. + */ + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + + err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); + if (err) + goto out_fw; + intel_gt_pm_init(gt); - return 0; + gt->vm = kernel_vm(gt); + if (!gt->vm) { + err = -ENOMEM; + goto err_pm; + } + + err = intel_engines_init(gt); + if (err) + goto err_engines; + + intel_uc_init(>->uc); + + err = intel_gt_resume(gt); + if (err) + goto err_uc_init; + + err = __engines_record_defaults(gt); + if (err) + goto err_gt; + + err = __engines_verify_workarounds(gt); + if (err) + goto err_gt; + + err = i915_inject_probe_error(gt->i915, -EIO); + if (err) + goto err_gt; + + goto out_fw; +err_gt: + __intel_gt_disable(gt); + intel_uc_fini_hw(>->uc); +err_uc_init: + intel_uc_fini(>->uc); +err_engines: + intel_engines_release(gt); + i915_vm_put(fetch_and_zero(>->vm)); +err_pm: + intel_gt_pm_fini(gt); + intel_gt_fini_scratch(gt); +out_fw: + if (err) + intel_gt_set_wedged_on_init(gt); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + return err; } void intel_gt_driver_remove(struct intel_gt *gt) { - GEM_BUG_ON(gt->awake); - intel_gt_pm_disable(gt); + __intel_gt_disable(gt); + + intel_uc_fini_hw(>->uc); + intel_uc_fini(>->uc); + + intel_engines_release(gt); } void intel_gt_driver_unregister(struct intel_gt *gt) { - intel_gpu_ips_teardown(); + intel_rps_driver_unregister(>->rps); } void intel_gt_driver_release(struct intel_gt *gt) { - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_gt_pm_disable(gt); - intel_gt_pm_fini(gt); + struct i915_address_space *vm; + vm = fetch_and_zero(>->vm); + if (vm) /* FIXME being called twice on error paths :( */ + i915_vm_put(vm); + + intel_gt_pm_fini(gt); intel_gt_fini_scratch(gt); } void intel_gt_driver_late_release(struct intel_gt *gt) { intel_uc_driver_late_release(>->uc); + intel_gt_fini_requests(gt); intel_gt_fini_reset(gt); + intel_gt_fini_timelines(gt); + intel_engines_free(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index e6ab0bff0efb..2355cf129e9c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -12,6 +12,12 @@ struct drm_i915_private; +#define GT_TRACE(gt, fmt, ...) do { \ + const struct intel_gt *gt__ __maybe_unused = (gt); \ + GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ + ##__VA_ARGS__); \ +} while (0) + static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) { return container_of(uc, struct intel_gt, uc); @@ -28,7 +34,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) } void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); -void intel_gt_init_hw_early(struct drm_i915_private *i915); +void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt); int __must_check intel_gt_init_hw(struct intel_gt *gt); int intel_gt_init(struct intel_gt *gt); void intel_gt_driver_register(struct intel_gt *gt); @@ -46,8 +52,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt, void intel_gt_flush_ggtt_writes(struct intel_gt *gt); void intel_gt_chipset_flush(struct intel_gt *gt); -void intel_gt_init_hangcheck(struct intel_gt *gt); - static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, enum intel_gt_scratch_field field) { @@ -59,6 +63,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt) return __intel_reset_failed(>->reset); } -void intel_gt_queue_hangcheck(struct intel_gt *gt); - #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 34a4fb624bf7..f796bdf1ed30 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -11,6 +11,7 @@ #include "intel_gt.h" #include "intel_gt_irq.h" #include "intel_uncore.h" +#include "intel_rps.h" static void guc_irq_handler(struct intel_guc *guc, u16 iir) { @@ -27,7 +28,7 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir) tasklet = true; if (iir & GT_RENDER_USER_INTERRUPT) { - intel_engine_breadcrumbs_irq(engine); + intel_engine_signal_breadcrumbs(engine); tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); } @@ -77,7 +78,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, return guc_irq_handler(>->uc.guc, iir); if (instance == OTHER_GTPM_INSTANCE) - return gen11_rps_irq_handler(gt, iir); + return gen11_rps_irq_handler(>->rps, iir); WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); @@ -244,9 +245,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]); + intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); if (gt_iir & ILK_BSD_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]); + intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); } static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) @@ -270,11 +271,11 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]); + intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); if (gt_iir & GT_BSD_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]); + intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); if (gt_iir & GT_BLT_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]); + intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -336,7 +337,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) } if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { - gen6_rps_irq_handler(gt->i915, gt_iir[2]); + gen6_rps_irq_handler(>->rps, gt_iir[2]); guc_irq_handler(>->uc.guc, gt_iir[2] >> 16); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index b866d5b1eee0..45b68a17da4d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -4,6 +4,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/suspend.h> + #include "i915_drv.h" #include "i915_globals.h" #include "i915_params.h" @@ -12,13 +14,28 @@ #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" +#include "intel_llc.h" #include "intel_pm.h" #include "intel_rc6.h" +#include "intel_rps.h" #include "intel_wakeref.h" -static void pm_notify(struct intel_gt *gt, int state) +static void user_forcewake(struct intel_gt *gt, bool suspend) { - blocking_notifier_call_chain(>->pm_notifications, state, gt->i915); + int count = atomic_read(>->user_wakeref); + + /* Inside suspend/resume so single threaded, no races to worry about. */ + if (likely(!count)) + return; + + intel_gt_pm_get(gt); + if (suspend) { + GEM_BUG_ON(count > atomic_read(>->wakeref.count)); + atomic_sub(count, >->wakeref.count); + } else { + atomic_add(count, >->wakeref.count); + } + intel_gt_pm_put(gt); } static int __gt_unpark(struct intel_wakeref *wf) @@ -26,7 +43,7 @@ static int __gt_unpark(struct intel_wakeref *wf) struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); struct drm_i915_private *i915 = gt->i915; - GEM_TRACE("\n"); + GT_TRACE(gt, "\n"); i915_globals_unpark(); @@ -44,19 +61,12 @@ static int __gt_unpark(struct intel_wakeref *wf) gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); GEM_BUG_ON(!gt->awake); - intel_enable_gt_powersave(i915); - - i915_update_gfx_val(i915); - if (INTEL_GEN(i915) >= 6) - gen6_rps_busy(i915); - + intel_rc6_unpark(>->rc6); + intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); - intel_gt_queue_hangcheck(gt); intel_gt_unpark_requests(gt); - pm_notify(gt, INTEL_GT_UNPARK); - return 0; } @@ -66,20 +76,21 @@ static int __gt_park(struct intel_wakeref *wf) intel_wakeref_t wakeref = fetch_and_zero(>->awake); struct drm_i915_private *i915 = gt->i915; - GEM_TRACE("\n"); + GT_TRACE(gt, "\n"); - pm_notify(gt, INTEL_GT_PARK); intel_gt_park_requests(gt); + i915_vma_parked(gt); i915_pmu_gt_parked(i915); - if (INTEL_GEN(i915) >= 6) - gen6_rps_idle(i915); + intel_rps_park(>->rps); + intel_rc6_park(>->rc6); /* Everything switched off, flush any residual interrupt just in case */ intel_synchronize_irq(i915); + /* Defer dropping the display power well for 100ms, it's slow! */ GEM_BUG_ON(!wakeref); - intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); + intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref); i915_globals_park(); @@ -89,14 +100,11 @@ static int __gt_park(struct intel_wakeref *wf) static const struct intel_wakeref_ops wf_ops = { .get = __gt_unpark, .put = __gt_park, - .flags = INTEL_WAKEREF_PUT_ASYNC, }; void intel_gt_pm_init_early(struct intel_gt *gt) { intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops); - - BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications); } void intel_gt_pm_init(struct intel_gt *gt) @@ -107,6 +115,7 @@ void intel_gt_pm_init(struct intel_gt *gt) * user. */ intel_rc6_init(>->rc6); + intel_rps_init(>->rps); } static bool reset_engines(struct intel_gt *gt) @@ -131,8 +140,22 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; + + GT_TRACE(gt, "force:%s", yesno(force)); - GEM_TRACE("\n"); + /* Use a raw wakeref to avoid calling intel_display_power_get early */ + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + + /* + * As we have just resumed the machine and woken the device up from + * deep PCI sleep (presumably D3_cold), assume the HW has been reset + * back to defaults, recovering from whatever wedged state we left it + * in and so worth trying to use the device once more. + */ + if (intel_gt_is_wedged(gt)) + intel_gt_unset_wedged(gt); intel_uc_sanitize(>->uc); @@ -140,6 +163,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) if (engine->reset.prepare) engine->reset.prepare(engine); + intel_uc_reset_prepare(>->uc); + if (reset_engines(gt) || force) { for_each_engine(engine, gt, id) __intel_engine_reset(engine, false); @@ -148,12 +173,9 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) for_each_engine(engine, gt, id) if (engine->reset.finish) engine->reset.finish(engine); -} -void intel_gt_pm_disable(struct intel_gt *gt) -{ - if (!is_mock_gt(gt)) - intel_sanitize_gt_powersave(gt->i915); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); } void intel_gt_pm_fini(struct intel_gt *gt) @@ -165,7 +187,9 @@ int intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - int err = 0; + int err; + + GT_TRACE(gt, "\n"); /* * After resume, we may need to poke into the pinned kernel @@ -174,9 +198,22 @@ int intel_gt_resume(struct intel_gt *gt) * allowing us to fixup the user contexts on their first pin. */ intel_gt_pm_get(gt); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); intel_rc6_sanitize(>->rc6); + /* Only when the HW is re-initialised, can we replay the requests */ + err = intel_gt_init_hw(gt); + if (err) { + dev_err(gt->i915->drm.dev, + "Failed to initialize GPU, declaring it wedged!\n"); + intel_gt_set_wedged(gt); + goto err_fw; + } + + intel_rps_enable(>->rps); + intel_llc_enable(>->llc); + for_each_engine(engine, gt, id) { struct intel_context *ce; @@ -185,9 +222,7 @@ int intel_gt_resume(struct intel_gt *gt) ce = engine->kernel_context; if (ce) { GEM_BUG_ON(!intel_context_is_pinned(ce)); - mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_); ce->ops->reset(ce); - mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); } engine->serial++; /* kernel context lost */ @@ -203,43 +238,98 @@ int intel_gt_resume(struct intel_gt *gt) } intel_rc6_enable(>->rc6); + + intel_uc_resume(>->uc); + + user_forcewake(gt, false); + +err_fw: intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_gt_pm_put(gt); return err; } -static void wait_for_idle(struct intel_gt *gt) +static void wait_for_suspend(struct intel_gt *gt) { + if (!intel_gt_pm_is_awake(gt)) + return; + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { /* * Forcibly cancel outstanding work and leave * the gpu quiet. */ intel_gt_set_wedged(gt); + intel_gt_retire_requests(gt); } intel_gt_pm_wait_for_idle(gt); } -void intel_gt_suspend(struct intel_gt *gt) +void intel_gt_suspend_prepare(struct intel_gt *gt) +{ + user_forcewake(gt, true); + wait_for_suspend(gt); + + intel_uc_suspend(>->uc); +} + +static suspend_state_t pm_suspend_target(void) +{ +#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP) + return pm_suspend_target_state; +#else + return PM_SUSPEND_TO_IDLE; +#endif +} + +void intel_gt_suspend_late(struct intel_gt *gt) { intel_wakeref_t wakeref; /* We expect to be idle already; but also want to be independent */ - wait_for_idle(gt); + wait_for_suspend(gt); + + if (is_mock_gt(gt)) + return; + + GEM_BUG_ON(gt->awake); + + /* + * On disabling the device, we want to turn off HW access to memory + * that we no longer own. + * + * However, not all suspend-states disable the device. S0 (s2idle) + * is effectively runtime-suspend, the device is left powered on + * but needs to be put into a low power state. We need to keep + * powermanagement enabled, but we also retain system state and so + * it remains safe to keep on using our allocated memory. + */ + if (pm_suspend_target() == PM_SUSPEND_TO_IDLE) + return; - with_intel_runtime_pm(gt->uncore->rpm, wakeref) + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + intel_rps_disable(>->rps); intel_rc6_disable(>->rc6); + intel_llc_disable(>->llc); + } + + intel_gt_sanitize(gt, false); + + GT_TRACE(gt, "\n"); } void intel_gt_runtime_suspend(struct intel_gt *gt) { intel_uc_runtime_suspend(>->uc); + + GT_TRACE(gt, "\n"); } int intel_gt_runtime_resume(struct intel_gt *gt) { + GT_TRACE(gt, "\n"); intel_gt_init_swizzling(gt); return intel_uc_runtime_resume(>->uc); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 997770d3a968..4a9e48c12bd4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -12,11 +12,6 @@ #include "intel_gt_types.h" #include "intel_wakeref.h" -enum { - INTEL_GT_UNPARK, - INTEL_GT_PARK, -}; - static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt) { return intel_wakeref_is_active(>->wakeref); @@ -27,6 +22,11 @@ static inline void intel_gt_pm_get(struct intel_gt *gt) intel_wakeref_get(>->wakeref); } +static inline void __intel_gt_pm_get(struct intel_gt *gt) +{ + __intel_wakeref_get(>->wakeref); +} + static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt) { return intel_wakeref_get_if_active(>->wakeref); @@ -37,6 +37,11 @@ static inline void intel_gt_pm_put(struct intel_gt *gt) intel_wakeref_put(>->wakeref); } +static inline void intel_gt_pm_put_async(struct intel_gt *gt) +{ + intel_wakeref_put_async(>->wakeref); +} + static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { return intel_wakeref_wait_for_idle(>->wakeref); @@ -44,13 +49,13 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_pm_init(struct intel_gt *gt); -void intel_gt_pm_disable(struct intel_gt *gt); void intel_gt_pm_fini(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); +void intel_gt_suspend_prepare(struct intel_gt *gt); +void intel_gt_suspend_late(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt); -void intel_gt_suspend(struct intel_gt *gt); void intel_gt_runtime_suspend(struct intel_gt *gt); int intel_gt_runtime_resume(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index b73229a84d85..b4f04614230e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -4,8 +4,11 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/workqueue.h> + #include "i915_drv.h" /* for_each_engine() */ #include "i915_request.h" +#include "intel_engine_heartbeat.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" @@ -20,13 +23,88 @@ static void retire_requests(struct intel_timeline *tl) break; } -static void flush_submission(struct intel_gt *gt) +static bool flush_submission(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; + bool active = false; + + for_each_engine(engine, gt, id) { + active |= intel_engine_flush_submission(engine); + active |= flush_work(&engine->retire_work); + } + + return active; +} + +static void engine_retire(struct work_struct *work) +{ + struct intel_engine_cs *engine = + container_of(work, typeof(*engine), retire_work); + struct intel_timeline *tl = xchg(&engine->retire, NULL); + + do { + struct intel_timeline *next = xchg(&tl->retire, NULL); + + /* + * Our goal here is to retire _idle_ timelines as soon as + * possible (as they are idle, we do not expect userspace + * to be cleaning up anytime soon). + * + * If the timeline is currently locked, either it is being + * retired elsewhere or about to be! + */ + if (mutex_trylock(&tl->mutex)) { + retire_requests(tl); + mutex_unlock(&tl->mutex); + } + intel_timeline_put(tl); + + GEM_BUG_ON(!next); + tl = ptr_mask_bits(next, 1); + } while (tl); +} + +static bool add_retire(struct intel_engine_cs *engine, + struct intel_timeline *tl) +{ +#define STUB ((struct intel_timeline *)1) + struct intel_timeline *first; + + /* + * We open-code a llist here to include the additional tag [BIT(0)] + * so that we know when the timeline is already on a + * retirement queue: either this engine or another. + */ + + if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */ + return false; + + intel_timeline_get(tl); + first = READ_ONCE(engine->retire); + do + tl->retire = ptr_pack_bits(first, 1, 1); + while (!try_cmpxchg(&engine->retire, &first, tl)); + + return !first; +} + +void intel_engine_add_retire(struct intel_engine_cs *engine, + struct intel_timeline *tl) +{ + if (add_retire(engine, tl)) + schedule_work(&engine->retire_work); +} - for_each_engine(engine, gt, id) - intel_engine_flush_submission(engine); +void intel_engine_init_retire(struct intel_engine_cs *engine) +{ + INIT_WORK(&engine->retire_work, engine_retire); +} + +void intel_engine_fini_retire(struct intel_engine_cs *engine) +{ + flush_work(&engine->retire_work); + GEM_BUG_ON(engine->retire); } long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) @@ -34,7 +112,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl, *tn; unsigned long active_count = 0; - unsigned long flags; bool interruptible; LIST_HEAD(free); @@ -44,7 +121,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) flush_submission(gt); /* kick the ksoftirqd tasklets */ - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) { active_count++; /* report busy to caller, try again? */ @@ -52,9 +129,9 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) } intel_timeline_get(tl); - GEM_BUG_ON(!tl->active_count); - tl->active_count++; /* pin the list element */ - spin_unlock_irqrestore(&timelines->lock, flags); + GEM_BUG_ON(!atomic_read(&tl->active_count)); + atomic_inc(&tl->active_count); /* pin the list element */ + spin_unlock(&timelines->lock); if (timeout > 0) { struct dma_fence *fence; @@ -70,28 +147,31 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) retire_requests(tl); - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); /* Resume iteration after dropping lock */ list_safe_reset_next(tl, tn, link); - if (--tl->active_count) - active_count += !!rcu_access_pointer(tl->last_request.fence); - else + if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); + else + active_count += i915_active_fence_isset(&tl->last_request); mutex_unlock(&tl->mutex); /* Defer the final release to after the spinlock */ if (refcount_dec_and_test(&tl->kref.refcount)) { - GEM_BUG_ON(tl->active_count); + GEM_BUG_ON(atomic_read(&tl->active_count)); list_add(&tl->link, &free); } } - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); + if (flush_submission(gt)) + active_count++; + return active_count ? timeout : 0; } @@ -115,9 +195,9 @@ static void retire_work_handler(struct work_struct *work) struct intel_gt *gt = container_of(work, typeof(*gt), requests.retire_work.work); - intel_gt_retire_requests(gt); schedule_delayed_work(>->requests.retire_work, round_jiffies_up_relative(HZ)); + intel_gt_retire_requests(gt); } void intel_gt_init_requests(struct intel_gt *gt) @@ -135,3 +215,9 @@ void intel_gt_unpark_requests(struct intel_gt *gt) schedule_delayed_work(>->requests.retire_work, round_jiffies_up_relative(HZ)); } + +void intel_gt_fini_requests(struct intel_gt *gt) +{ + /* Wait until the work is marked as finished before unloading! */ + cancel_delayed_work_sync(>->requests.retire_work); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h index bd31cbce47e0..dbac53baf1cb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h @@ -7,7 +7,9 @@ #ifndef INTEL_GT_REQUESTS_H #define INTEL_GT_REQUESTS_H +struct intel_engine_cs; struct intel_gt; +struct intel_timeline; long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout); static inline void intel_gt_retire_requests(struct intel_gt *gt) @@ -15,10 +17,16 @@ static inline void intel_gt_retire_requests(struct intel_gt *gt) intel_gt_retire_requests_timeout(gt, 0); } +void intel_engine_init_retire(struct intel_engine_cs *engine); +void intel_engine_add_retire(struct intel_engine_cs *engine, + struct intel_timeline *tl); +void intel_engine_fini_retire(struct intel_engine_cs *engine); + int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); void intel_gt_init_requests(struct intel_gt *gt); void intel_gt_park_requests(struct intel_gt *gt); void intel_gt_unpark_requests(struct intel_gt *gt); +void intel_gt_fini_requests(struct intel_gt *gt); #endif /* INTEL_GT_REQUESTS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index ae4aaf75ac78..96890dd12b5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -20,6 +20,7 @@ #include "intel_llc_types.h" #include "intel_reset_types.h" #include "intel_rc6_types.h" +#include "intel_rps_types.h" #include "intel_wakeref.h" struct drm_i915_private; @@ -27,14 +28,6 @@ struct i915_ggtt; struct intel_engine_cs; struct intel_uncore; -struct intel_hangcheck { - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - - struct delayed_work work; -}; - struct intel_gt { struct drm_i915_private *i915; struct intel_uncore *uncore; @@ -68,7 +61,6 @@ struct intel_gt { struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ - struct intel_hangcheck hangcheck; struct intel_reset reset; /** @@ -82,8 +74,7 @@ struct intel_gt { struct intel_llc llc; struct intel_rc6 rc6; - - struct blocking_notifier_head pm_notifications; + struct intel_rps rps; ktime_t last_init_time; @@ -99,6 +90,13 @@ struct intel_gt { struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; + + /* + * Default address space (either GGTT or ppGTT depending on arch). + * + * Reserved for exclusive use by the kernel. + */ + struct i915_address_space *vm; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c deleted file mode 100644 index 0fdef00af9e4..000000000000 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "i915_drv.h" -#include "intel_engine.h" -#include "intel_gt.h" -#include "intel_reset.h" - -struct hangcheck { - u64 acthd; - u32 ring; - u32 head; - enum intel_engine_hangcheck_action action; - unsigned long action_timestamp; - int deadlock; - struct intel_instdone instdone; - bool wedged:1; - bool stalled:1; -}; - -static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) -{ - u32 tmp = current_instdone | *old_instdone; - bool unchanged; - - unchanged = tmp == *old_instdone; - *old_instdone |= tmp; - - return unchanged; -} - -static bool subunits_stuck(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - struct intel_instdone instdone; - struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; - bool stuck; - int slice; - int subslice; - - intel_engine_get_instdone(engine, &instdone); - - /* There might be unstable subunit states even when - * actual head is not moving. Filter out the unstable ones by - * accumulating the undone -> done transitions and only - * consider those as progress. - */ - stuck = instdone_unchanged(instdone.instdone, - &accu_instdone->instdone); - stuck &= instdone_unchanged(instdone.slice_common, - &accu_instdone->slice_common); - - for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) { - stuck &= instdone_unchanged(instdone.sampler[slice][subslice], - &accu_instdone->sampler[slice][subslice]); - stuck &= instdone_unchanged(instdone.row[slice][subslice], - &accu_instdone->row[slice][subslice]); - } - - return stuck; -} - -static enum intel_engine_hangcheck_action -head_stuck(struct intel_engine_cs *engine, u64 acthd) -{ - if (acthd != engine->hangcheck.acthd) { - - /* Clear subunit states on head movement */ - memset(&engine->hangcheck.instdone, 0, - sizeof(engine->hangcheck.instdone)); - - return ENGINE_ACTIVE_HEAD; - } - - if (!subunits_stuck(engine)) - return ENGINE_ACTIVE_SUBUNITS; - - return ENGINE_DEAD; -} - -static enum intel_engine_hangcheck_action -engine_stuck(struct intel_engine_cs *engine, u64 acthd) -{ - enum intel_engine_hangcheck_action ha; - u32 tmp; - - ha = head_stuck(engine, acthd); - if (ha != ENGINE_DEAD) - return ha; - - if (IS_GEN(engine->i915, 2)) - return ENGINE_DEAD; - - /* Is the chip hanging on a WAIT_FOR_EVENT? - * If so we can simply poke the RB_WAIT bit - * and break the hang. This should work on - * all but the second generation chipsets. - */ - tmp = ENGINE_READ(engine, RING_CTL); - if (tmp & RING_WAIT) { - intel_gt_handle_error(engine->gt, engine->mask, 0, - "stuck wait on %s", engine->name); - ENGINE_WRITE(engine, RING_CTL, tmp); - return ENGINE_WAIT_KICK; - } - - return ENGINE_DEAD; -} - -static void hangcheck_load_sample(struct intel_engine_cs *engine, - struct hangcheck *hc) -{ - hc->acthd = intel_engine_get_active_head(engine); - hc->ring = ENGINE_READ(engine, RING_START); - hc->head = ENGINE_READ(engine, RING_HEAD); -} - -static void hangcheck_store_sample(struct intel_engine_cs *engine, - const struct hangcheck *hc) -{ - engine->hangcheck.acthd = hc->acthd; - engine->hangcheck.last_ring = hc->ring; - engine->hangcheck.last_head = hc->head; -} - -static enum intel_engine_hangcheck_action -hangcheck_get_action(struct intel_engine_cs *engine, - const struct hangcheck *hc) -{ - if (intel_engine_is_idle(engine)) - return ENGINE_IDLE; - - if (engine->hangcheck.last_ring != hc->ring) - return ENGINE_ACTIVE_SEQNO; - - if (engine->hangcheck.last_head != hc->head) - return ENGINE_ACTIVE_SEQNO; - - return engine_stuck(engine, hc->acthd); -} - -static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, - struct hangcheck *hc) -{ - unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT; - - hc->action = hangcheck_get_action(engine, hc); - - /* We always increment the progress - * if the engine is busy and still processing - * the same request, so that no single request - * can run indefinitely (such as a chain of - * batches). The only time we do not increment - * the hangcheck score on this ring, if this - * engine is in a legitimate wait for another - * engine. In that case the waiting engine is a - * victim and we want to be sure we catch the - * right culprit. Then every time we do kick - * the ring, make it as a progress as the seqno - * advancement might ensure and if not, it - * will catch the hanging engine. - */ - - switch (hc->action) { - case ENGINE_IDLE: - case ENGINE_ACTIVE_SEQNO: - /* Clear head and subunit states on seqno movement */ - hc->acthd = 0; - - memset(&engine->hangcheck.instdone, 0, - sizeof(engine->hangcheck.instdone)); - - /* Intentional fall through */ - case ENGINE_WAIT_KICK: - case ENGINE_WAIT: - engine->hangcheck.action_timestamp = jiffies; - break; - - case ENGINE_ACTIVE_HEAD: - case ENGINE_ACTIVE_SUBUNITS: - /* - * Seqno stuck with still active engine gets leeway, - * in hopes that it is just a long shader. - */ - timeout = I915_SEQNO_DEAD_TIMEOUT; - break; - - case ENGINE_DEAD: - break; - - default: - MISSING_CASE(hc->action); - } - - hc->stalled = time_after(jiffies, - engine->hangcheck.action_timestamp + timeout); - hc->wedged = time_after(jiffies, - engine->hangcheck.action_timestamp + - I915_ENGINE_WEDGED_TIMEOUT); -} - -static void hangcheck_declare_hang(struct intel_gt *gt, - intel_engine_mask_t hung, - intel_engine_mask_t stuck) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - char msg[80]; - int len; - - /* If some rings hung but others were still busy, only - * blame the hanging rings in the synopsis. - */ - if (stuck != hung) - hung &= ~stuck; - len = scnprintf(msg, sizeof(msg), - "%s on ", stuck == hung ? "no progress" : "hang"); - for_each_engine_masked(engine, gt, hung, tmp) - len += scnprintf(msg + len, sizeof(msg) - len, - "%s, ", engine->name); - msg[len-2] = '\0'; - - return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg); -} - -/* - * This is called when the chip hasn't reported back with completed - * batchbuffers in a long time. We keep track per ring seqno progress and - * if there are no progress, hangcheck score for that ring is increased. - * Further, acthd is inspected to see if the ring is stuck. On stuck case - * we kick the ring. If we see no progress on three subsequent calls - * we assume chip is wedged and try to fix it by resetting the chip. - */ -static void hangcheck_elapsed(struct work_struct *work) -{ - struct intel_gt *gt = - container_of(work, typeof(*gt), hangcheck.work.work); - intel_engine_mask_t hung = 0, stuck = 0, wedged = 0; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - - if (!i915_modparams.enable_hangcheck) - return; - - if (!READ_ONCE(gt->awake)) - return; - - if (intel_gt_is_wedged(gt)) - return; - - wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm); - if (!wakeref) - return; - - /* As enabling the GPU requires fairly extensive mmio access, - * periodically arm the mmio checker to see if we are triggering - * any invalid access. - */ - intel_uncore_arm_unclaimed_mmio_detection(gt->uncore); - - for_each_engine(engine, gt, id) { - struct hangcheck hc; - - intel_engine_breadcrumbs_irq(engine); - - hangcheck_load_sample(engine, &hc); - hangcheck_accumulate_sample(engine, &hc); - hangcheck_store_sample(engine, &hc); - - if (hc.stalled) { - hung |= engine->mask; - if (hc.action != ENGINE_DEAD) - stuck |= engine->mask; - } - - if (hc.wedged) - wedged |= engine->mask; - } - - if (GEM_SHOW_DEBUG() && (hung | stuck)) { - struct drm_printer p = drm_debug_printer("hangcheck"); - - for_each_engine(engine, gt, id) { - if (intel_engine_is_idle(engine)) - continue; - - intel_engine_dump(engine, &p, "%s\n", engine->name); - } - } - - if (wedged) { - dev_err(gt->i915->drm.dev, - "GPU recovery timed out," - " cancelling all in-flight rendering.\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - } - - if (hung) - hangcheck_declare_hang(gt, hung, stuck); - - intel_runtime_pm_put(gt->uncore->rpm, wakeref); - - /* Reset timer in case GPU hangs without another request being added */ - intel_gt_queue_hangcheck(gt); -} - -void intel_gt_queue_hangcheck(struct intel_gt *gt) -{ - unsigned long delay; - - if (unlikely(!i915_modparams.enable_hangcheck)) - return; - - /* - * Don't continually defer the hangcheck so that it is always run at - * least once after work has been scheduled on any ring. Otherwise, - * we will ignore a hung ring if a second ring is kept busy. - */ - - delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); - queue_delayed_work(system_long_wq, >->hangcheck.work, delay); -} - -void intel_engine_init_hangcheck(struct intel_engine_cs *engine) -{ - memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); - engine->hangcheck.action_timestamp = jiffies; -} - -void intel_gt_init_hangcheck(struct intel_gt *gt) -{ - INIT_DELAYED_WORK(>->hangcheck.work, hangcheck_elapsed); -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftest_hangcheck.c" -#endif diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index 35093eb5f24e..ceb785b75c25 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -48,7 +48,7 @@ static bool get_ia_constants(struct intel_llc *llc, struct ia_constants *consts) { struct drm_i915_private *i915 = llc_to_gt(llc)->i915; - struct intel_rps *rps = &i915->gt_pm.rps; + struct intel_rps *rps = &llc_to_gt(llc)->rps; if (rps->max_freq <= rps->min_freq) return false; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index d0088d020220..4fb70a7716e3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -133,18 +133,19 @@ */ #include <linux/interrupt.h> -#include "gem/i915_gem_context.h" - #include "i915_drv.h" #include "i915_perf.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_context.h" #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" #include "intel_reset.h" +#include "intel_ring.h" #include "intel_workarounds.h" #define RING_EXECLIST_QFULL (1 << 0x2) @@ -234,16 +235,9 @@ static void execlists_init_reg_state(u32 *reg_state, const struct intel_engine_cs *engine, const struct intel_ring *ring, bool close); - -static void __context_pin_acquire(struct intel_context *ce) -{ - mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _RET_IP_); -} - -static void __context_pin_release(struct intel_context *ce) -{ - mutex_release(&ce->pin_mutex.dep_map, 0, _RET_IP_); -} +static void +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine); static void mark_eio(struct i915_request *rq) { @@ -256,6 +250,23 @@ static void mark_eio(struct i915_request *rq) i915_request_mark_complete(rq); } +static struct i915_request * +active_request(const struct intel_timeline * const tl, struct i915_request *rq) +{ + struct i915_request *active = rq; + + rcu_read_lock(); + list_for_each_entry_continue_reverse(rq, &tl->requests, link) { + if (i915_request_completed(rq)) + break; + + active = rq; + } + rcu_read_unlock(); + + return active; +} + static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) { return (i915_ggtt_offset(engine->status_page.vma) + @@ -460,8 +471,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) if (IS_GEN(engine->i915, 8)) desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; - /* bits 12-31 */ + desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */ /* * The following 32bits are copied into the OA reports (dword 2). * Consider updating oa_get_render_ctx_id in i915_perf.c when changing @@ -834,12 +844,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) } } -static void unwind_wa_tail(struct i915_request *rq) -{ - rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); - assert_ring_tail_valid(rq->ring, rq->tail); -} - static struct i915_request * __unwind_incomplete_requests(struct intel_engine_cs *engine) { @@ -852,12 +856,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_for_each_entry_safe_reverse(rq, rn, &engine->active.requests, sched.link) { - if (i915_request_completed(rq)) continue; /* XXX */ __i915_request_unsubmit(rq); - unwind_wa_tail(rq); /* * Push the request back into the queue for later resubmission. @@ -877,7 +879,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_move(&rq->sched.link, pl); active = rq; } else { - struct intel_engine_cs *owner = rq->hw_context->engine; + struct intel_engine_cs *owner = rq->context->engine; /* * Decouple the virtual breadcrumb before moving it @@ -925,14 +927,180 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status) status, rq); } +static void intel_engine_context_in(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (READ_ONCE(engine->stats.enabled) == 0) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (engine->stats.enabled > 0) { + if (engine->stats.active++ == 0) + engine->stats.start = ktime_get(); + GEM_BUG_ON(engine->stats.active == 0); + } + + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void intel_engine_context_out(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (READ_ONCE(engine->stats.enabled) == 0) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (engine->stats.enabled > 0) { + ktime_t last; + + if (engine->stats.active && --engine->stats.active == 0) { + /* + * Decrement the active context count and in case GPU + * is now idle add up to the running total. + */ + last = ktime_sub(ktime_get(), engine->stats.start); + + engine->stats.total = ktime_add(engine->stats.total, + last); + } else if (engine->stats.active == 0) { + /* + * After turning on engine stats, context out might be + * the first event in which case we account from the + * time stats gathering was turned on. + */ + last = ktime_sub(ktime_get(), engine->stats.enabled_at); + + engine->stats.total = ktime_add(engine->stats.total, + last); + } + } + + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) +{ + if (INTEL_GEN(engine->i915) >= 12) + return 0x60; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x54; + else if (engine->class == RENDER_CLASS) + return 0x58; + else + return -1; +} + +static void +execlists_check_context(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + const struct intel_ring *ring = ce->ring; + u32 *regs = ce->lrc_reg_state; + bool valid = true; + int x; + + if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { + pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", + engine->name, + regs[CTX_RING_START], + i915_ggtt_offset(ring->vma)); + regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); + valid = false; + } + + if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) != + (RING_CTL_SIZE(ring->size) | RING_VALID)) { + pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", + engine->name, + regs[CTX_RING_CTL], + (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); + regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; + valid = false; + } + + x = lrc_ring_mi_mode(engine); + if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { + pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", + engine->name, regs[x + 1]); + regs[x + 1] &= ~STOP_RING; + regs[x + 1] |= STOP_RING << 16; + valid = false; + } + + WARN_ONCE(!valid, "Invalid lrc state found before submission\n"); +} + +static void restore_default_state(struct intel_context *ce, + struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + + if (engine->pinned_default_state) + memcpy(regs, /* skip restoring the vanilla PPHWSP */ + engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, + engine->context_size - PAGE_SIZE); + + execlists_init_reg_state(regs, ce, engine, ce->ring, false); +} + +static void reset_active(struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct intel_context * const ce = rq->context; + u32 head; + + /* + * The executing context has been cancelled. We want to prevent + * further execution along this context and propagate the error on + * to anything depending on its results. + * + * In __i915_request_submit(), we apply the -EIO and remove the + * requests' payloads for any banned requests. But first, we must + * rewind the context back to the start of the incomplete request so + * that we do not jump back into the middle of the batch. + * + * We preserve the breadcrumbs and semaphores of the incomplete + * requests so that inter-timeline dependencies (i.e other timelines) + * remain correctly ordered. And we defer to __i915_request_submit() + * so that all asynchronous waits are correctly handled. + */ + ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n", + rq->fence.context, rq->fence.seqno); + + /* On resubmission of the active request, payload will be scrubbed */ + if (i915_request_completed(rq)) + head = rq->tail; + else + head = active_request(ce->timeline, rq)->head; + ce->ring->head = intel_ring_wrap(ce->ring, head); + intel_ring_update_space(ce->ring); + + /* Scrub the context image to prevent replaying the previous batch */ + restore_default_state(ce, engine); + __execlists_update_reg_state(ce, engine); + + /* We've switched away, so this should be a no-op, but intent matters */ + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; +} + static inline struct intel_engine_cs * __execlists_schedule_in(struct i915_request *rq) { struct intel_engine_cs * const engine = rq->engine; - struct intel_context * const ce = rq->hw_context; + struct intel_context * const ce = rq->context; intel_context_get(ce); + if (unlikely(intel_context_is_banned(ce))) + reset_active(rq, engine); + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + execlists_check_context(ce, engine); + if (ce->tag) { /* Use a fixed tag for OA and friends */ ce->lrc_desc |= (u64)ce->tag << 32; @@ -945,7 +1113,7 @@ __execlists_schedule_in(struct i915_request *rq) BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); } - intel_gt_pm_get(engine->gt); + __intel_gt_pm_get(engine->gt); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(engine); @@ -955,7 +1123,7 @@ __execlists_schedule_in(struct i915_request *rq) static inline struct i915_request * execlists_schedule_in(struct i915_request *rq, int idx) { - struct intel_context * const ce = rq->hw_context; + struct intel_context * const ce = rq->context; struct intel_engine_cs *old; GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); @@ -986,11 +1154,25 @@ static inline void __execlists_schedule_out(struct i915_request *rq, struct intel_engine_cs * const engine) { - struct intel_context * const ce = rq->hw_context; + struct intel_context * const ce = rq->context; + + /* + * NB process_csb() is not under the engine->active.lock and hence + * schedule_out can race with schedule_in meaning that we should + * refrain from doing non-trivial work here. + */ + + /* + * If we have just completed this context, the engine may now be + * idle and we want to re-enter powersaving. + */ + if (list_is_last(&rq->link, &ce->timeline->requests) && + i915_request_completed(rq)) + intel_engine_add_retire(engine, ce->timeline); intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); - intel_gt_pm_put(engine->gt); + intel_gt_pm_put_async(engine->gt); /* * If this is part of a virtual engine, its next request may @@ -1010,7 +1192,7 @@ __execlists_schedule_out(struct i915_request *rq, static inline void execlists_schedule_out(struct i915_request *rq) { - struct intel_context * const ce = rq->hw_context; + struct intel_context * const ce = rq->context; struct intel_engine_cs *cur, *old; trace_i915_request_out(rq); @@ -1025,13 +1207,29 @@ execlists_schedule_out(struct i915_request *rq) i915_request_put(rq); } -static u64 execlists_update_context(const struct i915_request *rq) +static u64 execlists_update_context(struct i915_request *rq) { - struct intel_context *ce = rq->hw_context; - u64 desc; + struct intel_context *ce = rq->context; + u64 desc = ce->lrc_desc; + u32 tail; - ce->lrc_reg_state[CTX_RING_TAIL] = - intel_ring_set_tail(rq->ring, rq->tail); + /* + * WaIdleLiteRestore:bdw,skl + * + * We should never submit the context with the same RING_TAIL twice + * just in case we submit an empty ring, which confuses the HW. + * + * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of + * the normal request to be able to always advance the RING_TAIL on + * subsequent resubmissions (for lite restore). Should that fail us, + * and we try and submit the same tail again, force the context + * reload. + */ + tail = intel_ring_set_tail(rq->ring, rq->tail); + if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail)) + desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_reg_state[CTX_RING_TAIL] = tail; + rq->tail = rq->wa_tail; /* * Make sure the context image is complete before we submit it to HW. @@ -1042,21 +1240,14 @@ static u64 execlists_update_context(const struct i915_request *rq) * may not be visible to the HW prior to the completion of the UC * register write and that we may begin execution from the context * before its image is complete leading to invalid PD chasing. - * - * Furthermore, Braswell, at least, wants a full mb to be sure that - * the writes are coherent in memory (visible to the GPU) prior to - * execution, and not just visible to other CPUs (as is the result of - * wmb). */ - mb(); - - desc = ce->lrc_desc; - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + wmb(); /* Wa_1607138340:tgl */ if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0)) desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1082,15 +1273,14 @@ trace_ports(const struct intel_engine_execlists *execlists, if (!ports[0]) return; - GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", - engine->name, msg, - ports[0]->fence.context, - ports[0]->fence.seqno, - i915_request_completed(ports[0]) ? "!" : - i915_request_started(ports[0]) ? "*" : - "", - ports[1] ? ports[1]->fence.context : 0, - ports[1] ? ports[1]->fence.seqno : 0); + ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg, + ports[0]->fence.context, + ports[0]->fence.seqno, + i915_request_completed(ports[0]) ? "!" : + i915_request_started(ports[0]) ? "*" : + "", + ports[1] ? ports[1]->fence.context : 0, + ports[1] ? ports[1]->fence.seqno : 0); } static __maybe_unused bool @@ -1114,33 +1304,56 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, } for (port = execlists->pending; (rq = *port); port++) { - if (ce == rq->hw_context) { - GEM_TRACE_ERR("Duplicate context in pending[%zd]\n", + unsigned long flags; + bool ok = true; + + GEM_BUG_ON(!kref_read(&rq->fence.refcount)); + GEM_BUG_ON(!i915_request_is_active(rq)); + + if (ce == rq->context) { + GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n", + ce->timeline->fence_context, port - execlists->pending); return false; } + ce = rq->context; - ce = rq->hw_context; - if (i915_request_completed(rq)) + /* Hold tightly onto the lock to prevent concurrent retires! */ + if (!spin_trylock_irqsave(&rq->lock, flags)) continue; - if (i915_active_is_idle(&ce->active)) { - GEM_TRACE_ERR("Inactive context in pending[%zd]\n", + if (i915_request_completed(rq)) + goto unlock; + + if (i915_active_is_idle(&ce->active) && + !intel_context_is_barrier(ce)) { + GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n", + ce->timeline->fence_context, port - execlists->pending); - return false; + ok = false; + goto unlock; } if (!i915_vma_is_pinned(ce->state)) { - GEM_TRACE_ERR("Unpinned context in pending[%zd]\n", + GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n", + ce->timeline->fence_context, port - execlists->pending); - return false; + ok = false; + goto unlock; } if (!i915_vma_is_pinned(ce->ring->vma)) { - GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n", + GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n", + ce->timeline->fence_context, port - execlists->pending); - return false; + ok = false; + goto unlock; } + +unlock: + spin_unlock_irqrestore(&rq->lock, flags); + if (!ok) + return false; } return ce; @@ -1185,7 +1398,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) static bool ctx_single_port_submission(const struct intel_context *ce) { return (IS_ENABLED(CONFIG_DRM_I915_GVT) && - i915_gem_context_force_single_submission(ce->gem_context)); + intel_context_force_single_submission(ce)); } static bool can_merge_ctx(const struct intel_context *prev, @@ -1221,7 +1434,7 @@ static bool can_merge_rq(const struct i915_request *prev, (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL))) return false; - if (!can_merge_ctx(prev->hw_context, next->hw_context)) + if (!can_merge_ctx(prev->context, next->context)) return false; return true; @@ -1269,7 +1482,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, if (!list_empty(&ve->context.signal_link)) { list_move_tail(&ve->context.signal_link, &engine->breadcrumbs.signalers); - intel_engine_queue_breadcrumbs(engine); + intel_engine_signal_breadcrumbs(engine); } spin_unlock(&old->breadcrumbs.irq_lock); } @@ -1345,7 +1558,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) { int hint; - if (!intel_engine_has_semaphores(engine)) + if (!intel_engine_has_timeslices(engine)) return false; if (list_is_last(&rq->sched.link, &engine->active.requests)) @@ -1366,15 +1579,32 @@ switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) return rq_prio(list_next_entry(rq, sched.link)); } -static bool -enable_timeslice(const struct intel_engine_execlists *execlists) +static inline unsigned long +timeslice(const struct intel_engine_cs *engine) { - const struct i915_request *rq = *execlists->active; + return READ_ONCE(engine->props.timeslice_duration_ms); +} + +static unsigned long +active_timeslice(const struct intel_engine_cs *engine) +{ + const struct i915_request *rq = *engine->execlists.active; if (i915_request_completed(rq)) - return false; + return 0; + + if (engine->execlists.switch_priority_hint < effective_prio(rq)) + return 0; + + return timeslice(engine); +} - return execlists->switch_priority_hint >= effective_prio(rq); +static void set_timeslice(struct intel_engine_cs *engine) +{ + if (!intel_engine_has_timeslices(engine)) + return; + + set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); } static void record_preemption(struct intel_engine_execlists *execlists) @@ -1382,6 +1612,30 @@ static void record_preemption(struct intel_engine_execlists *execlists) (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); } +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = last_active(&engine->execlists); + if (!rq) + return 0; + + /* Force a fast reset for terminated contexts (ignoring sysfs!) */ + if (unlikely(intel_context_is_banned(rq->context))) + return 1; + + return READ_ONCE(engine->props.preempt_timeout_ms); +} + +static void set_preempt_timeout(struct intel_engine_cs *engine) +{ + if (!intel_engine_has_preempt_reset(engine)) + return; + + set_timer_ms(&engine->execlists.preempt, + active_preempt_timeout(engine)); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -1444,12 +1698,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last = last_active(execlists); if (last) { if (need_preempt(engine, last, rb)) { - GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n", - engine->name, - last->fence.context, - last->fence.seqno, - last->sched.attr.priority, - execlists->queue_priority_hint); + ENGINE_TRACE(engine, + "preempting last=%llx:%lld, prio=%d, hint=%d\n", + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); record_preemption(execlists); /* @@ -1475,16 +1729,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * tendency to ignore us rewinding the TAIL to the * end of an earlier request. */ - last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; + last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE; last = NULL; } else if (need_timeslice(engine, last) && timer_expired(&engine->execlists.timer)) { - GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n", - engine->name, - last->fence.context, - last->fence.seqno, - last->sched.attr.priority, - execlists->queue_priority_hint); + ENGINE_TRACE(engine, + "expired last=%llx:%lld, prio=%d, hint=%d\n", + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); ring_set_paused(engine, 1); defer_active(engine); @@ -1521,20 +1775,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ if (!execlists->timer.expires && need_timeslice(engine, last)) - mod_timer(&execlists->timer, - jiffies + 1); + set_timer_ms(&execlists->timer, + timeslice(engine)); + return; } - - /* - * WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent - * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_fini_breadcrumb() for - * where we prepare the padding after the - * end of the request. - */ - last->tail = last->wa_tail; } } @@ -1556,7 +1801,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(rq != ve->request); GEM_BUG_ON(rq->engine != &ve->base); - GEM_BUG_ON(rq->hw_context != &ve->context); + GEM_BUG_ON(rq->context != &ve->context); if (rq_prio(rq) >= queue_prio(execlists)) { if (!virtual_matches(ve, rq, engine)) { @@ -1570,14 +1815,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) return; /* leave this for another */ } - GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", - engine->name, - rq->fence.context, - rq->fence.seqno, - i915_request_completed(rq) ? "!" : - i915_request_started(rq) ? "*" : - "", - yesno(engine != ve->siblings[0])); + ENGINE_TRACE(engine, + "virtual rq=%llx:%lld%s, new engine? %s\n", + rq->fence.context, + rq->fence.seqno, + i915_request_completed(rq) ? "!" : + i915_request_started(rq) ? "*" : + "", + yesno(engine != ve->siblings[0])); ve->request = NULL; ve->base.execlists.queue_priority_hint = INT_MIN; @@ -1675,7 +1920,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * same LRCA, i.e. we must submit 2 different * contexts if we submit 2 ELSP. */ - if (last->hw_context == rq->hw_context) + if (last->context == rq->context) goto done; if (i915_request_has_sentinel(last)) @@ -1688,8 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * the same context (even though a different * request) to the second port. */ - if (ctx_single_port_submission(last->hw_context) || - ctx_single_port_submission(rq->hw_context)) + if (ctx_single_port_submission(last->context) || + ctx_single_port_submission(rq->context)) goto done; merge = false; @@ -1703,8 +1948,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } GEM_BUG_ON(last && - !can_merge_ctx(last->hw_context, - rq->hw_context)); + !can_merge_ctx(last->context, + rq->context)); submit = true; last = rq; @@ -1733,9 +1978,6 @@ done: * interrupt for secondary ports). */ execlists->queue_priority_hint = queue_prio(execlists); - GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n", - engine->name, execlists->queue_priority_hint, - yesno(submit)); if (submit) { *port = execlists_schedule_in(last, port - execlists->pending); @@ -1757,6 +1999,8 @@ done: memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); + + set_preempt_timeout(engine); } else { skip_submit: ring_set_paused(engine, 0); @@ -1766,16 +2010,17 @@ skip_submit: static void cancel_port_requests(struct intel_engine_execlists * const execlists) { - struct i915_request * const *port, *rq; + struct i915_request * const *port; - for (port = execlists->pending; (rq = *port); port++) - execlists_schedule_out(rq); + for (port = execlists->pending; *port; port++) + execlists_schedule_out(*port); memset(execlists->pending, 0, sizeof(execlists->pending)); - for (port = execlists->active; (rq = *port); port++) - execlists_schedule_out(rq); - execlists->active = - memset(execlists->inflight, 0, sizeof(execlists->inflight)); + /* Mark the end of active before we overwrite *active */ + for (port = xchg(&execlists->active, execlists->pending); *port; port++) + execlists_schedule_out(*port); + WRITE_ONCE(execlists->active, + memset(execlists->inflight, 0, sizeof(execlists->inflight))); } static inline void @@ -1867,7 +2112,7 @@ static void process_csb(struct intel_engine_cs *engine) */ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && !reset_in_progress(execlists)); - GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915)); + GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); /* * Note that csb_write, csb_status may be either in HWSP or mmio. @@ -1881,7 +2126,7 @@ static void process_csb(struct intel_engine_cs *engine) */ head = execlists->csb_head; tail = READ_ONCE(*execlists->csb_write); - GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); + ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); if (unlikely(head == tail)) return; @@ -1919,35 +2164,35 @@ static void process_csb(struct intel_engine_cs *engine) * status notifier. */ - GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n", - engine->name, head, - buf[2 * head + 0], buf[2 * head + 1]); + ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", + head, buf[2 * head + 0], buf[2 * head + 1]); if (INTEL_GEN(engine->i915) >= 12) promote = gen12_csb_parse(execlists, buf + 2 * head); else promote = gen8_csb_parse(execlists, buf + 2 * head); if (promote) { + struct i915_request * const *old = execlists->active; + + /* Point active to the new ELSP; prevent overwriting */ + WRITE_ONCE(execlists->active, execlists->pending); + set_timeslice(engine); + if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); /* cancel old inflight, prepare for switch */ - trace_ports(execlists, "preempted", execlists->active); - while (*execlists->active) - execlists_schedule_out(*execlists->active++); + trace_ports(execlists, "preempted", old); + while (*old) + execlists_schedule_out(*old++); /* switch pending to inflight */ GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); - execlists->active = - memcpy(execlists->inflight, - execlists->pending, - execlists_num_ports(execlists) * - sizeof(*execlists->pending)); - - if (enable_timeslice(execlists)) - mod_timer(&execlists->timer, jiffies + 1); - else - cancel_timer(&execlists->timer); + WRITE_ONCE(execlists->active, + memcpy(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists) * + sizeof(*execlists->pending))); WRITE_ONCE(execlists->pending[0], NULL); } else { @@ -1997,6 +2242,42 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) } } +static noinline void preempt_reset(struct intel_engine_cs *engine) +{ + const unsigned int bit = I915_RESET_ENGINE + engine->id; + unsigned long *lock = &engine->gt->reset.flags; + + if (i915_modparams.reset < 3) + return; + + if (test_and_set_bit(bit, lock)) + return; + + /* Mark this tasklet as disabled to avoid waiting for it to complete */ + tasklet_disable_nosync(&engine->execlists.tasklet); + + ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n", + READ_ONCE(engine->props.preempt_timeout_ms), + jiffies_to_msecs(jiffies - engine->execlists.preempt.expires)); + intel_engine_reset(engine, "preemption time out"); + + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(bit, lock); +} + +static bool preempt_timeout(const struct intel_engine_cs *const engine) +{ + const struct timer_list *t = &engine->execlists.preempt; + + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) + return false; + + if (!timer_expired(t)) + return false; + + return READ_ONCE(engine->execlists.pending[0]); +} + /* * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. @@ -2004,23 +2285,39 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) static void execlists_submission_tasklet(unsigned long data) { struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; - unsigned long flags; + bool timeout = preempt_timeout(engine); process_csb(engine); - if (!READ_ONCE(engine->execlists.pending[0])) { + if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { + unsigned long flags; + spin_lock_irqsave(&engine->active.lock, flags); __execlists_submission_tasklet(engine); spin_unlock_irqrestore(&engine->active.lock, flags); + + /* Recheck after serialising with direct-submission */ + if (timeout && preempt_timeout(engine)) + preempt_reset(engine); } } -static void execlists_submission_timer(struct timer_list *timer) +static void __execlists_kick(struct intel_engine_execlists *execlists) { - struct intel_engine_cs *engine = - from_timer(engine, timer, execlists.timer); - /* Kick the tasklet for some interrupt coalescing and reset handling */ - tasklet_hi_schedule(&engine->execlists.tasklet); + tasklet_hi_schedule(&execlists->tasklet); +} + +#define execlists_kick(t, member) \ + __execlists_kick(container_of(t, struct intel_engine_execlists, member)) + +static void execlists_timeslice(struct timer_list *timer) +{ + execlists_kick(timer, timer); +} + +static void execlists_preempt(struct timer_list *timer) +{ + execlists_kick(timer, preempt); } static void queue_request(struct intel_engine_cs *engine, @@ -2100,7 +2397,6 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine) if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) return; - vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr += engine->context_size; memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); @@ -2112,7 +2408,6 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) return; - vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr += engine->context_size; if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) @@ -2140,7 +2435,7 @@ __execlists_update_reg_state(const struct intel_context *ce, GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); - regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma); + regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); regs[CTX_RING_HEAD] = ring->head; regs[CTX_RING_TAIL] = ring->tail; @@ -2268,7 +2563,7 @@ static int execlists_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); + GEM_BUG_ON(!intel_context_is_pinned(request->context)); /* * Flush enough space to reduce the likelihood of waiting after @@ -2669,8 +2964,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) struct intel_engine_execlists * const execlists = &engine->execlists; unsigned long flags; - GEM_TRACE("%s: depth<-%d\n", engine->name, - atomic_read(&execlists->tasklet.count)); + ENGINE_TRACE(engine, "depth<-%d\n", + atomic_read(&execlists->tasklet.count)); /* * Prevent request submission to the hardware until we have @@ -2723,41 +3018,28 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) WRITE_ONCE(*execlists->csb_write, reset_value); wmb(); /* Make sure this is visible to HW (paranoia?) */ + /* + * Sometimes Icelake forgets to reset its pointers on a GPU reset. + * Bludgeon them with a mmio update to be sure. + */ + ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, + reset_value << 8 | reset_value); + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + invalidate_csb_entries(&execlists->csb_status[0], &execlists->csb_status[reset_value]); } -static struct i915_request *active_request(struct i915_request *rq) -{ - const struct intel_context * const ce = rq->hw_context; - struct i915_request *active = NULL; - struct list_head *list; - - if (!i915_request_is_active(rq)) /* unwound, but incomplete! */ - return rq; - - list = &i915_request_active_timeline(rq)->requests; - list_for_each_entry_from_reverse(rq, list, link) { - if (i915_request_completed(rq)) - break; - - if (rq->hw_context != ce) - break; - - active = rq; - } - - return active; -} - static void __execlists_reset_reg_state(const struct intel_context *ce, const struct intel_engine_cs *engine) { u32 *regs = ce->lrc_reg_state; + int x; - if (INTEL_GEN(engine->i915) >= 9) { - regs[GEN9_CTX_RING_MI_MODE + 1] &= ~STOP_RING; - regs[GEN9_CTX_RING_MI_MODE + 1] |= STOP_RING << 16; + x = lrc_ring_mi_mode(engine); + if (x != -1) { + regs[x + 1] &= ~STOP_RING; + regs[x + 1] |= STOP_RING << 16; } } @@ -2766,7 +3048,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_context *ce; struct i915_request *rq; - u32 *regs; mb(); /* paranoia: read the CSB pointers from after the reset */ clflush(execlists->csb_write); @@ -2789,22 +3070,20 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) /* We still have requests in-flight; the engine should be active */ GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); - ce = rq->hw_context; + ce = rq->context; GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); - /* Proclaim we have exclusive access to the context image! */ - __context_pin_acquire(ce); - - rq = active_request(rq); - if (!rq) { + if (i915_request_completed(rq)) { /* Idle context; tidy up the ring so we can restart afresh */ - ce->ring->head = ce->ring->tail; + ce->ring->head = intel_ring_wrap(ce->ring, rq->tail); goto out_replay; } /* Context has requests still in-flight; it should not be idle! */ GEM_BUG_ON(i915_active_is_idle(&ce->active)); + rq = active_request(ce->timeline, rq); ce->ring->head = intel_ring_wrap(ce->ring, rq->head); + GEM_BUG_ON(ce->ring->head == ce->ring->tail); /* * If this request hasn't started yet, e.g. it is waiting on a @@ -2845,22 +3124,15 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * to recreate its own state. */ GEM_BUG_ON(!intel_context_is_pinned(ce)); - regs = ce->lrc_reg_state; - if (engine->pinned_default_state) { - memcpy(regs, /* skip restoring the vanilla PPHWSP */ - engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, - engine->context_size - PAGE_SIZE); - } - execlists_init_reg_state(regs, ce, engine, ce->ring, false); + restore_default_state(ce, engine); out_replay: - GEM_TRACE("%s replay {head:%04x, tail:%04x\n", - engine->name, ce->ring->head, ce->ring->tail); + ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", + ce->ring->head, ce->ring->tail); intel_ring_update_space(ce->ring); __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine); ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ - __context_pin_release(ce); unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -2868,11 +3140,11 @@ unwind: __unwind_incomplete_requests(engine); } -static void execlists_reset(struct intel_engine_cs *engine, bool stalled) +static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) { unsigned long flags; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); spin_lock_irqsave(&engine->active.lock, flags); @@ -2886,14 +3158,14 @@ static void nop_submission_tasklet(unsigned long data) /* The driver is wedged; don't process any more events. */ } -static void execlists_cancel_requests(struct intel_engine_cs *engine) +static void execlists_reset_cancel(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request *rq, *rn; struct rb_node *rb; unsigned long flags; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); /* * Before we call engine->cancel_requests(), we should have exclusive @@ -2980,13 +3252,13 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) if (__tasklet_enable(&execlists->tasklet)) /* And kick in case we missed a new request submission. */ tasklet_hi_schedule(&execlists->tasklet); - GEM_TRACE("%s: depth->%d\n", engine->name, - atomic_read(&execlists->tasklet.count)); + ENGINE_TRACE(engine, "depth->%d\n", + atomic_read(&execlists->tasklet.count)); } -static int gen8_emit_bb_start(struct i915_request *rq, - u64 offset, u32 len, - const unsigned int flags) +static int gen8_emit_bb_start_noarb(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) { u32 *cs; @@ -3020,7 +3292,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, return 0; } -static int gen9_emit_bb_start(struct i915_request *rq, +static int gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) { @@ -3469,17 +3741,18 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) static void execlists_park(struct intel_engine_cs *engine) { cancel_timer(&engine->execlists.timer); + cancel_timer(&engine->execlists.preempt); } void intel_execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; - engine->cancel_requests = execlists_cancel_requests; engine->schedule = i915_schedule; engine->execlists.tasklet.func = execlists_submission_tasklet; engine->reset.prepare = execlists_reset_prepare; - engine->reset.reset = execlists_reset; + engine->reset.rewind = execlists_reset_rewind; + engine->reset.cancel = execlists_reset_cancel; engine->reset.finish = execlists_reset_finish; engine->park = execlists_park; @@ -3494,13 +3767,27 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) if (INTEL_GEN(engine->i915) >= 12) engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; + + if (intel_engine_has_preemption(engine)) + engine->emit_bb_start = gen8_emit_bb_start; + else + engine->emit_bb_start = gen8_emit_bb_start_noarb; } -static void execlists_destroy(struct intel_engine_cs *engine) +static void execlists_shutdown(struct intel_engine_cs *engine) { + /* Synchronise with residual timers and any softirq they raise */ + del_timer_sync(&engine->execlists.timer); + del_timer_sync(&engine->execlists.preempt); + tasklet_kill(&engine->execlists.tasklet); +} + +static void execlists_release(struct intel_engine_cs *engine) +{ + execlists_shutdown(engine); + intel_engine_cleanup_common(engine); lrc_destroy_wa_ctx(engine); - kfree(engine); } static void @@ -3508,13 +3795,9 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ - engine->destroy = execlists_destroy; + engine->release = execlists_release; engine->resume = execlists_resume; - engine->reset.prepare = execlists_reset_prepare; - engine->reset.reset = execlists_reset; - engine->reset.finish = execlists_reset_finish; - engine->cops = &execlists_context_ops; engine->request_alloc = execlists_request_alloc; @@ -3537,10 +3820,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) * until a more refined solution exists. */ } - if (IS_GEN(engine->i915, 8)) - engine->emit_bb_start = gen8_emit_bb_start; - else - engine->emit_bb_start = gen9_emit_bb_start; } static inline void @@ -3584,9 +3863,15 @@ static void rcs_submission_override(struct intel_engine_cs *engine) int intel_execlists_submission_setup(struct intel_engine_cs *engine) { + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; + u32 base = engine->mmio_base; + tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); - timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); + timer_setup(&engine->execlists.timer, execlists_timeslice, 0); + timer_setup(&engine->execlists.preempt, execlists_preempt, 0); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); @@ -3594,21 +3879,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) if (engine->class == RENDER_CLASS) rcs_submission_override(engine); - return 0; -} - -int intel_execlists_submission_init(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct drm_i915_private *i915 = engine->i915; - struct intel_uncore *uncore = engine->uncore; - u32 base = engine->mmio_base; - int ret; - - ret = intel_engine_init_common(engine); - if (ret) - return ret; - if (intel_init_workaround_bb(engine)) /* * We continue even if we fail to initialize WA batch @@ -3689,7 +3959,7 @@ static void init_common_reg_state(u32 * const regs, _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); - regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID; + regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; regs[CTX_BB_STATE] = RING_BB_PPGTT; } @@ -3796,12 +4066,6 @@ populate_lr_context(struct intel_context *ce, set_redzone(vaddr, engine); if (engine->default_state) { - /* - * We only want to copy over the template context state; - * skipping over the headers reserved for GuC communication, - * leaving those as zero. - */ - const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE; void *defaults; defaults = i915_gem_object_pin_map(engine->default_state, @@ -3811,8 +4075,9 @@ populate_lr_context(struct intel_context *ce, goto err_unpin_ctx; } - memcpy(vaddr + start, defaults + start, engine->context_size); + memcpy(vaddr, defaults, engine->context_size); i915_gem_object_unpin_map(engine->default_state); + __set_bit(CONTEXT_VALID_BIT, &ce->flags); inhibit = false; } @@ -3826,9 +4091,7 @@ populate_lr_context(struct intel_context *ce, ret = 0; err_unpin_ctx: - __i915_gem_object_flush_map(ctx_obj, - LRC_HEADER_PAGES * PAGE_SIZE, - engine->context_size); + __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); i915_gem_object_unpin_map(ctx_obj); return ret; } @@ -3845,11 +4108,6 @@ static int __execlists_context_alloc(struct intel_context *ce, GEM_BUG_ON(ce->state); context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); - /* - * Before the actual start of the context image, we insert a few pages - * for our own use and for sharing with the GuC. - */ - context_size += LRC_HEADER_PAGES * PAGE_SIZE; if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) context_size += I915_GTT_PAGE_SIZE; /* for redzone */ @@ -3917,17 +4175,18 @@ static void virtual_context_destroy(struct kref *kref) for (n = 0; n < ve->num_siblings; n++) { struct intel_engine_cs *sibling = ve->siblings[n]; struct rb_node *node = &ve->nodes[sibling->id].rb; + unsigned long flags; if (RB_EMPTY_NODE(node)) continue; - spin_lock_irq(&sibling->active.lock); + spin_lock_irqsave(&sibling->active.lock, flags); /* Detachment is lazily performed in the execlists tasklet */ if (!RB_EMPTY_NODE(node)) rb_erase_cached(node, &sibling->execlists.virtual); - spin_unlock_irq(&sibling->active.lock); + spin_unlock_irqrestore(&sibling->active.lock, flags); } GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); @@ -3966,6 +4225,13 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve) ve->siblings[0]); } +static int virtual_context_alloc(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + + return __execlists_context_alloc(ce, ve->siblings[0]); +} + static int virtual_context_pin(struct intel_context *ce) { struct virtual_engine *ve = container_of(ce, typeof(*ve), context); @@ -4003,6 +4269,8 @@ static void virtual_context_exit(struct intel_context *ce) } static const struct intel_context_ops virtual_context_ops = { + .alloc = virtual_context_alloc, + .pin = virtual_context_pin, .unpin = execlists_context_unpin, @@ -4029,10 +4297,9 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) mask = ve->siblings[0]->mask; } - GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n", - ve->base.name, - rq->fence.context, rq->fence.seqno, - mask, ve->base.execlists.queue_priority_hint); + ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n", + rq->fence.context, rq->fence.seqno, + mask, ve->base.execlists.queue_priority_hint); return mask; } @@ -4123,10 +4390,9 @@ static void virtual_submit_request(struct i915_request *rq) struct i915_request *old; unsigned long flags; - GEM_TRACE("%s: rq=%llx:%lld\n", - ve->base.name, - rq->fence.context, - rq->fence.seqno); + ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n", + rq->fence.context, + rq->fence.seqno); GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); @@ -4194,8 +4460,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) } struct intel_context * -intel_execlists_create_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs **siblings, +intel_execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) { struct virtual_engine *ve; @@ -4206,13 +4471,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, return ERR_PTR(-EINVAL); if (count == 1) - return intel_context_create(ctx, siblings[0]); + return intel_context_create(siblings[0]); ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); if (!ve) return ERR_PTR(-ENOMEM); - ve->base.i915 = ctx->i915; + ve->base.i915 = siblings[0]->i915; ve->base.gt = siblings[0]->gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; @@ -4239,7 +4504,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_breadcrumbs(&ve->base); - intel_engine_init_execlists(&ve->base); ve->base.cops = &virtual_context_ops; @@ -4255,7 +4519,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, virtual_submission_tasklet, (unsigned long)ve); - intel_context_init(&ve->context, ctx, &ve->base); + intel_context_init(&ve->context, &ve->base); for (n = 0; n < count; n++) { struct intel_engine_cs *sibling = siblings[n]; @@ -4322,12 +4586,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.flags |= I915_ENGINE_IS_VIRTUAL; - err = __execlists_context_alloc(&ve->context, siblings[0]); - if (err) - goto err_put; - - __set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags); - return &ve->context; err_put: @@ -4336,14 +4594,12 @@ err_put: } struct intel_context * -intel_execlists_clone_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs *src) +intel_execlists_clone_virtual(struct intel_engine_cs *src) { struct virtual_engine *se = to_virtual_engine(src); struct intel_context *dst; - dst = intel_execlists_create_virtual(ctx, - se->siblings, + dst = intel_execlists_create_virtual(se->siblings, se->num_siblings); if (IS_ERR(dst)) return dst; @@ -4502,7 +4758,6 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, bool scrub) { GEM_BUG_ON(!intel_context_is_pinned(ce)); - __context_pin_acquire(ce); /* * We want a simple context + ring to execute the breadcrumb update. @@ -4512,23 +4767,21 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - if (scrub) { - u32 *regs = ce->lrc_reg_state; - - if (engine->pinned_default_state) { - memcpy(regs, /* skip restoring the vanilla PPHWSP */ - engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, - engine->context_size - PAGE_SIZE); - } - execlists_init_reg_state(regs, ce, engine, ce->ring, false); - } + if (scrub) + restore_default_state(ce, engine); /* Rerun the request; its payload has been neutered (if guilty). */ ce->ring->head = head; intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); - __context_pin_release(ce); +} + +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) +{ + return engine->set_default_submission == + intel_execlists_set_default_submission; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index 99dc576a4e25..dfbc214e14f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -43,6 +43,7 @@ struct intel_engine_cs; #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) #define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2) +#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8) #define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) #define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) #define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) @@ -82,34 +83,14 @@ enum { void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int intel_execlists_submission_setup(struct intel_engine_cs *engine); -int intel_execlists_submission_init(struct intel_engine_cs *engine); /* Logical Ring Contexts */ - -/* - * We allocate a header at the start of the context image for our own - * use, therefore the actual location of the logical state is offset - * from the start of the VMA. The layout is - * - * | [guc] | [hwsp] [logical state] | - * |<- our header ->|<- context image ->| - * - */ -/* The first page is used for sharing data with the GuC */ -#define LRC_GUCSHR_PN (0) -#define LRC_GUCSHR_SZ (1) /* At the start of the context image is its per-process HWS page */ -#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ) +#define LRC_PPHWSP_PN (0) #define LRC_PPHWSP_SZ (1) -/* Finally we have the logical state for the context */ +/* After the PPHWSP we have the logical state for the context */ #define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) -/* - * Currently we include the PPHWSP in __intel_engine_context_size() so - * the size of the header is synonymous with the start of the PPHWSP. - */ -#define LRC_HEADER_PAGES LRC_PPHWSP_PN - /* Space within PPHWSP reserved to be used as scratch */ #define LRC_PPHWSP_SCRATCH 0x34 #define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32)) @@ -129,13 +110,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, unsigned int max); struct intel_context * -intel_execlists_create_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs **siblings, +intel_execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count); struct intel_context * -intel_execlists_clone_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs *src); +intel_execlists_clone_virtual(struct intel_engine_cs *src); int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, @@ -145,4 +124,7 @@ struct intel_engine_cs * intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, unsigned int sibling); +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); + #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 06ab0276e10e..08a3be65f700 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -13,8 +13,8 @@ #define CTX_CONTEXT_CONTROL (0x02 + 1) #define CTX_RING_HEAD (0x04 + 1) #define CTX_RING_TAIL (0x06 + 1) -#define CTX_RING_BUFFER_START (0x08 + 1) -#define CTX_RING_BUFFER_CONTROL (0x0a + 1) +#define CTX_RING_START (0x08 + 1) +#define CTX_RING_CTL (0x0a + 1) #define CTX_BB_STATE (0x10 + 1) #define CTX_BB_PER_CTX_PTR (0x18 + 1) #define CTX_PDP3_UDW (0x24 + 1) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 5bac3966906b..893249ea48d4 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -26,6 +26,7 @@ #include "intel_gt.h" #include "intel_mocs.h" #include "intel_lrc.h" +#include "intel_ring.h" /* structures required */ struct drm_i915_mocs_entry { @@ -199,14 +200,6 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { MOCS_ENTRY(15, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ L3_3_WB), \ - /* Bypass LLC - Uncached (EHL+) */ \ - MOCS_ENTRY(16, \ - LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \ - L3_1_UC), \ - /* Bypass LLC - L3 (Read-Only) (EHL+) */ \ - MOCS_ENTRY(17, \ - LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \ - L3_3_WB), \ /* Self-Snoop - L3 + LLC */ \ MOCS_ENTRY(18, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \ @@ -270,7 +263,7 @@ static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { L3_1_UC), /* HW Special Case (Displayable) */ MOCS_ENTRY(61, - LE_1_UC | LE_TC_1_LLC | LE_SCF(1), + LE_1_UC | LE_TC_1_LLC, L3_3_WB), }; @@ -290,65 +283,42 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { static bool get_mocs_settings(const struct drm_i915_private *i915, struct drm_i915_mocs_table *table) { - bool result = false; - if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tigerlake_mocs_table); table->table = tigerlake_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; - result = true; } else if (IS_GEN(i915, 11)) { table->size = ARRAY_SIZE(icelake_mocs_table); table->table = icelake_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; - result = true; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skylake_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = skylake_mocs_table; - result = true; } else if (IS_GEN9_LP(i915)) { table->size = ARRAY_SIZE(broxton_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = broxton_mocs_table; - result = true; } else { WARN_ONCE(INTEL_GEN(i915) >= 9, "Platform that should have a MOCS table does not.\n"); + return false; } + if (GEM_DEBUG_WARN_ON(table->size > table->n_entries)) + return false; + /* WaDisableSkipCaching:skl,bxt,kbl,glk */ if (IS_GEN(i915, 9)) { int i; for (i = 0; i < table->size; i++) - if (WARN_ON(table->table[i].l3cc_value & - (L3_ESC(1) | L3_SCC(0x7)))) + if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value & + (L3_ESC(1) | L3_SCC(0x7)))) return false; } - return result; -} - -static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index) -{ - switch (engine->id) { - case RCS0: - return GEN9_GFX_MOCS(index); - case VCS0: - return GEN9_MFX0_MOCS(index); - case BCS0: - return GEN9_BLT_MOCS(index); - case VECS0: - return GEN9_VEBOX_MOCS(index); - case VCS1: - return GEN9_MFX1_MOCS(index); - case VCS2: - return GEN11_MFX2_MOCS(index); - default: - MISSING_CASE(engine->id); - return INVALID_MMIO_REG; - } + return true; } /* @@ -358,29 +328,47 @@ static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index) static u32 get_entry_control(const struct drm_i915_mocs_table *table, unsigned int index) { - if (table->table[index].used) + if (index < table->size && table->table[index].used) return table->table[index].control_value; return table->table[I915_MOCS_PTE].control_value; } -static void init_mocs_table(struct intel_engine_cs *engine, - const struct drm_i915_mocs_table *table) +#define for_each_mocs(mocs, t, i) \ + for (i = 0; \ + i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\ + i++) + +static void __init_mocs_table(struct intel_uncore *uncore, + const struct drm_i915_mocs_table *table, + u32 addr) { - struct intel_uncore *uncore = engine->uncore; - u32 unused_value = table->table[I915_MOCS_PTE].control_value; unsigned int i; + u32 mocs; + + for_each_mocs(mocs, table, i) + intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs); +} - for (i = 0; i < table->size; i++) - intel_uncore_write_fw(uncore, - mocs_register(engine, i), - get_entry_control(table, i)); +static u32 mocs_offset(const struct intel_engine_cs *engine) +{ + static const u32 offset[] = { + [RCS0] = __GEN9_RCS0_MOCS0, + [VCS0] = __GEN9_VCS0_MOCS0, + [VCS1] = __GEN9_VCS1_MOCS0, + [VECS0] = __GEN9_VECS0_MOCS0, + [BCS0] = __GEN9_BCS0_MOCS0, + [VCS2] = __GEN11_VCS2_MOCS0, + }; + + GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset)); + return offset[engine->id]; +} - /* All remaining entries are unused */ - for (; i < table->n_entries; i++) - intel_uncore_write_fw(uncore, - mocs_register(engine, i), - unused_value); +static void init_mocs_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table) +{ + __init_mocs_table(engine->uncore, table, mocs_offset(engine)); } /* @@ -390,51 +378,34 @@ static void init_mocs_table(struct intel_engine_cs *engine, static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table, unsigned int index) { - if (table->table[index].used) + if (index < table->size && table->table[index].used) return table->table[index].l3cc_value; return table->table[I915_MOCS_PTE].l3cc_value; } -static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, - u16 low, - u16 high) +static inline u32 l3cc_combine(u16 low, u16 high) { return low | (u32)high << 16; } +#define for_each_l3cc(l3cc, t, i) \ + for (i = 0; \ + i < ((t)->n_entries + 1) / 2 ? \ + (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \ + get_entry_l3cc((t), 2 * i + 1))), 1 : \ + 0; \ + i++) + static void init_l3cc_table(struct intel_engine_cs *engine, const struct drm_i915_mocs_table *table) { struct intel_uncore *uncore = engine->uncore; - u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value; unsigned int i; + u32 l3cc; - for (i = 0; i < table->size / 2; i++) { - u16 low = get_entry_l3cc(table, 2 * i); - u16 high = get_entry_l3cc(table, 2 * i + 1); - - intel_uncore_write(uncore, - GEN9_LNCFCMOCS(i), - l3cc_combine(table, low, high)); - } - - /* Odd table size - 1 left over */ - if (table->size & 1) { - u16 low = get_entry_l3cc(table, 2 * i); - - intel_uncore_write(uncore, - GEN9_LNCFCMOCS(i), - l3cc_combine(table, low, unused_value)); - i++; - } - - /* All remaining entries are also unused */ - for (; i < table->n_entries / 2; i++) - intel_uncore_write(uncore, - GEN9_LNCFCMOCS(i), - l3cc_combine(table, unused_value, - unused_value)); + for_each_l3cc(l3cc, table, i) + intel_uncore_write_fw(uncore, GEN9_LNCFCMOCS(i), l3cc); } void intel_mocs_init_engine(struct intel_engine_cs *engine) @@ -455,38 +426,33 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) init_l3cc_table(engine, &table); } -static void intel_mocs_init_global(struct intel_gt *gt) +static u32 global_mocs_offset(void) { - struct intel_uncore *uncore = gt->uncore; - struct drm_i915_mocs_table table; - unsigned int index; + return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0)); +} - GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)); +static void init_global_mocs(struct intel_gt *gt) +{ + struct drm_i915_mocs_table table; - if (!get_mocs_settings(gt->i915, &table)) + /* + * LLC and eDRAM control values are not applicable to dgfx + */ + if (IS_DGFX(gt->i915)) return; - if (GEM_DEBUG_WARN_ON(table.size > table.n_entries)) + if (!get_mocs_settings(gt->i915, &table)) return; - for (index = 0; index < table.size; index++) - intel_uncore_write(uncore, - GEN12_GLOBAL_MOCS(index), - table.table[index].control_value); - - /* - * Ok, now set the unused entries to the invalid entry (index 0). These - * entries are officially undefined and no contract for the contents and - * settings is given for these entries. - */ - for (; index < table.n_entries; index++) - intel_uncore_write(uncore, - GEN12_GLOBAL_MOCS(index), - table.table[0].control_value); + __init_mocs_table(gt->uncore, &table, global_mocs_offset()); } void intel_mocs_init(struct intel_gt *gt) { if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) - intel_mocs_init_global(gt); + init_global_mocs(gt); } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_mocs.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 70f0e01a38b9..9e303c29d6e3 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -88,21 +88,18 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) * do not want the enable hysteresis to less than the wakeup latency. * * igt/gem_exec_nop/sequential provides a rough estimate for the - * service latency, and puts it around 10us for Broadwell (and other - * big core) and around 40us for Broxton (and other low power cores). - * [Note that for legacy ringbuffer submission, this is less than 1us!] - * However, the wakeup latency on Broxton is closer to 100us. To be - * conservative, we have to factor in a context switch on top (due - * to ksoftirqd). + * service latency, and puts it under 10us for Icelake, similar to + * Broadwell+, To be conservative, we want to factor in a context + * switch on top (due to ksoftirqd). */ - set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); - set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60); /* 3a: Enable RC6 */ - set(uncore, GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - GEN6_RC_CTL_EI_MODE(1)); + rc6->ctl_enable = + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1); set(uncore, GEN9_PG_ENABLE, GEN9_RENDER_PG_ENABLE | @@ -173,13 +170,18 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6) else rc6_mode = GEN6_RC_CTL_EI_MODE(1); - set(uncore, GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - rc6_mode); + rc6->ctl_enable = + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + rc6_mode; - set(uncore, GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); + /* + * WaRsDisableCoarsePowerGating:skl,cnl + * - Render/Media PG need to be disabled with RC6. + */ + if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6))) + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); } static void gen8_rc6_enable(struct intel_rc6 *rc6) @@ -198,10 +200,10 @@ static void gen8_rc6_enable(struct intel_rc6 *rc6) set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ /* 3: Enable RC6 */ - set(uncore, GEN6_RC_CONTROL, + rc6->ctl_enable = GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | - GEN6_RC_CTL_RC6_ENABLE); + GEN6_RC_CTL_RC6_ENABLE; } static void gen6_rc6_enable(struct intel_rc6 *rc6) @@ -237,10 +239,10 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; if (HAS_RC6pp(i915)) rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; - set(uncore, GEN6_RC_CONTROL, + rc6->ctl_enable = rc6_mask | GEN6_RC_CTL_EI_MODE(1) | - GEN6_RC_CTL_HW_ENABLE); + GEN6_RC_CTL_HW_ENABLE; rc6vids = 0; ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, @@ -358,7 +360,7 @@ static void chv_rc6_enable(struct intel_rc6 *rc6) VLV_RENDER_RC6_COUNT_EN)); /* 3: Enable RC6 */ - set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE); + rc6->ctl_enable = GEN7_RC_CTL_TO_MODE; } static void vlv_rc6_enable(struct intel_rc6 *rc6) @@ -384,8 +386,8 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6) VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); - set(uncore, GEN6_RC_CONTROL, - GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); + rc6->ctl_enable = + GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; } static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) @@ -486,6 +488,21 @@ static void rpm_put(struct intel_rc6 *rc6) rc6->wakeref = false; } +static bool pctx_corrupted(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return false; + + if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO)) + return false; + + dev_notice(i915->drm.dev, + "RC6 context corruption, disabling runtime power management\n"); + return true; +} + static void __intel_rc6_disable(struct intel_rc6 *rc6) { struct drm_i915_private *i915 = rc6_to_i915(rc6); @@ -525,6 +542,11 @@ void intel_rc6_init(struct intel_rc6 *rc6) void intel_rc6_sanitize(struct intel_rc6 *rc6) { + if (rc6->enabled) { /* unbalanced suspend/resume */ + rpm_get(rc6); + rc6->enabled = false; + } + if (rc6->supported) __intel_rc6_disable(rc6); } @@ -554,13 +576,51 @@ void intel_rc6_enable(struct intel_rc6 *rc6) else if (INTEL_GEN(i915) >= 6) gen6_rc6_enable(rc6); + rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE; + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + rc6->ctl_enable = 0; + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + if (unlikely(pctx_corrupted(rc6))) + return; + /* rc6 is ready, runtime-pm is go! */ rpm_put(rc6); rc6->enabled = true; } +void intel_rc6_unpark(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + if (!rc6->enabled) + return; + + /* Restore HW timers for automatic RC6 entry while busy */ + set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable); +} + +void intel_rc6_park(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + if (!rc6->enabled) + return; + + if (unlikely(pctx_corrupted(rc6))) { + intel_rc6_disable(rc6); + return; + } + + if (!rc6->manual) + return; + + /* Turn off the HW timers and go directly to rc6 */ + set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE); + set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT); +} + void intel_rc6_disable(struct intel_rc6 *rc6) { if (!rc6->enabled) @@ -710,3 +770,7 @@ u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg) { return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000); } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_rc6.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h index 5e6711f36457..9f0f23fca8af 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.h +++ b/drivers/gpu/drm/i915/gt/intel_rc6.h @@ -15,6 +15,9 @@ struct intel_rc6; void intel_rc6_init(struct intel_rc6 *rc6); void intel_rc6_fini(struct intel_rc6 *rc6); +void intel_rc6_unpark(struct intel_rc6 *rc6); +void intel_rc6_park(struct intel_rc6 *rc6); + void intel_rc6_sanitize(struct intel_rc6 *rc6); void intel_rc6_enable(struct intel_rc6 *rc6); void intel_rc6_disable(struct intel_rc6 *rc6); diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h index 214f354d6ae4..bfbb623f7a4f 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h @@ -18,10 +18,13 @@ struct intel_rc6 { u64 prev_hw_residency[4]; u64 cur_residency[4]; + u32 ctl_enable; + struct drm_i915_gem_object *pctx; bool supported : 1; bool enabled : 1; + bool manual : 1; bool wakeref : 1; }; diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 6d05f9c64178..5954ecc3207f 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -27,16 +27,7 @@ #include "i915_drv.h" #include "intel_renderstate.h" - -struct intel_renderstate { - const struct intel_renderstate_rodata *rodata; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u32 batch_offset; - u32 batch_size; - u32 aux_offset; - u32 aux_size; -}; +#include "intel_ring.h" static const struct intel_renderstate_rodata * render_state_get_rodata(const struct intel_engine_cs *engine) @@ -83,11 +74,11 @@ static int render_state_setup(struct intel_renderstate *so, u32 *d; int ret; - ret = i915_gem_object_prepare_write(so->obj, &needs_clflush); + ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush); if (ret) return ret; - d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0)); + d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0)); while (i < rodata->batch_items) { u32 s = rodata->batch[i]; @@ -165,7 +156,7 @@ static int render_state_setup(struct intel_renderstate *so, ret = 0; out: - i915_gem_object_finish_access(so->obj); + i915_gem_object_finish_access(so->vma->obj); return ret; err: @@ -176,61 +167,84 @@ err: #undef OUT_BATCH -int intel_renderstate_emit(struct i915_request *rq) +int intel_renderstate_init(struct intel_renderstate *so, + struct intel_engine_cs *engine) { - struct intel_engine_cs *engine = rq->engine; - struct intel_renderstate so = {}; /* keep the compiler happy */ + struct drm_i915_gem_object *obj; int err; - so.rodata = render_state_get_rodata(engine); - if (!so.rodata) + memset(so, 0, sizeof(*so)); + + so->rodata = render_state_get_rodata(engine); + if (!so->rodata) return 0; - if (so.rodata->batch_items * 4 > PAGE_SIZE) + if (so->rodata->batch_items * 4 > PAGE_SIZE) return -EINVAL; - so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(so.obj)) - return PTR_ERR(so.obj); + obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); - so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL); - if (IS_ERR(so.vma)) { - err = PTR_ERR(so.vma); + so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(so->vma)) { + err = PTR_ERR(so->vma); goto err_obj; } - err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto err_vma; - err = render_state_setup(&so, rq->i915); + err = render_state_setup(so, engine->i915); if (err) goto err_unpin; + return 0; + +err_unpin: + i915_vma_unpin(so->vma); +err_vma: + i915_vma_close(so->vma); +err_obj: + i915_gem_object_put(obj); + so->vma = NULL; + return err; +} + +int intel_renderstate_emit(struct intel_renderstate *so, + struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + int err; + + if (!so->vma) + return 0; + err = engine->emit_bb_start(rq, - so.batch_offset, so.batch_size, + so->batch_offset, so->batch_size, I915_DISPATCH_SECURE); if (err) - goto err_unpin; + return err; - if (so.aux_size > 8) { + if (so->aux_size > 8) { err = engine->emit_bb_start(rq, - so.aux_offset, so.aux_size, + so->aux_offset, so->aux_size, I915_DISPATCH_SECURE); if (err) - goto err_unpin; + return err; } - i915_vma_lock(so.vma); - err = i915_request_await_object(rq, so.vma->obj, false); + i915_vma_lock(so->vma); + err = i915_request_await_object(rq, so->vma->obj, false); if (err == 0) - err = i915_vma_move_to_active(so.vma, rq, 0); - i915_vma_unlock(so.vma); -err_unpin: - i915_vma_unpin(so.vma); -err_vma: - i915_vma_close(so.vma); -err_obj: - i915_gem_object_put(so.obj); + err = i915_vma_move_to_active(so->vma, rq, 0); + i915_vma_unlock(so->vma); + return err; } + +void intel_renderstate_fini(struct intel_renderstate *so) +{ + i915_vma_unpin_and_release(&so->vma, 0); +} diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h index 8d5079145054..5700be69a05a 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.h +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h @@ -27,6 +27,8 @@ #include <linux/types.h> struct i915_request; +struct intel_engine_cs; +struct i915_vma; struct intel_renderstate_rodata { const u32 *reloc; @@ -46,6 +48,19 @@ extern const struct intel_renderstate_rodata gen7_null_state; extern const struct intel_renderstate_rodata gen8_null_state; extern const struct intel_renderstate_rodata gen9_null_state; -int intel_renderstate_emit(struct i915_request *rq); +struct intel_renderstate { + const struct intel_renderstate_rodata *rodata; + struct i915_vma *vma; + u32 batch_offset; + u32 batch_size; + u32 aux_offset; + u32 aux_size; +}; + +int intel_renderstate_init(struct intel_renderstate *so, + struct intel_engine_cs *engine); +int intel_renderstate_emit(struct intel_renderstate *so, + struct i915_request *rq); +void intel_renderstate_fini(struct intel_renderstate *so); #endif /* _INTEL_RENDERSTATE_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index bf8d1ed4b1d8..1c51296646e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -21,6 +21,7 @@ #include "intel_reset.h" #include "uc/intel_guc.h" +#include "uc/intel_guc_submission.h" #define RESET_MAX_RETRIES 3 @@ -40,27 +41,29 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) static void engine_skip_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct i915_gem_context *hung_ctx = rq->gem_context; + struct intel_context *hung_ctx = rq->context; if (!i915_request_is_active(rq)) return; lockdep_assert_held(&engine->active.lock); list_for_each_entry_continue(rq, &engine->active.requests, sched.link) - if (rq->gem_context == hung_ctx) + if (rq->context == hung_ctx) i915_request_skip(rq, -EIO); } -static void client_mark_guilty(struct drm_i915_file_private *file_priv, - const struct i915_gem_context *ctx) +static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) { - unsigned int score; + struct drm_i915_file_private *file_priv = ctx->file_priv; unsigned long prev_hang; + unsigned int score; + + if (IS_ERR_OR_NULL(file_priv)) + return; - if (i915_gem_context_is_banned(ctx)) + score = 0; + if (banned) score = I915_CLIENT_SCORE_CONTEXT_BAN; - else - score = 0; prev_hang = xchg(&file_priv->hang_timestamp, jiffies); if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) @@ -75,17 +78,38 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv, } } -static bool context_mark_guilty(struct i915_gem_context *ctx) +static bool mark_guilty(struct i915_request *rq) { + struct i915_gem_context *ctx; unsigned long prev_hang; bool banned; int i; + rcu_read_lock(); + ctx = rcu_dereference(rq->context->gem_context); + if (ctx && !kref_get_unless_zero(&ctx->ref)) + ctx = NULL; + rcu_read_unlock(); + if (!ctx) + return false; + + if (i915_gem_context_is_closed(ctx)) { + intel_context_set_banned(rq->context); + banned = true; + goto out; + } + atomic_inc(&ctx->guilty_count); /* Cool contexts are too cool to be banned! (Used for reset testing.) */ - if (!i915_gem_context_is_bannable(ctx)) - return false; + if (!i915_gem_context_is_bannable(ctx)) { + banned = false; + goto out; + } + + dev_notice(ctx->i915->drm.dev, + "%s context reset due to GPU hang\n", + ctx->name); /* Record the timestamp for the last N hangs */ prev_hang = ctx->hang_timestamp[0]; @@ -100,18 +124,25 @@ static bool context_mark_guilty(struct i915_gem_context *ctx) if (banned) { DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", ctx->name, atomic_read(&ctx->guilty_count)); - i915_gem_context_set_banned(ctx); + intel_context_set_banned(rq->context); } - if (!IS_ERR_OR_NULL(ctx->file_priv)) - client_mark_guilty(ctx->file_priv, ctx); + client_mark_guilty(ctx, banned); +out: + i915_gem_context_put(ctx); return banned; } -static void context_mark_innocent(struct i915_gem_context *ctx) +static void mark_innocent(struct i915_request *rq) { - atomic_inc(&ctx->active_count); + struct i915_gem_context *ctx; + + rcu_read_lock(); + ctx = rcu_dereference(rq->context->gem_context); + if (ctx) + atomic_inc(&ctx->active_count); + rcu_read_unlock(); } void __i915_request_reset(struct i915_request *rq, bool guilty) @@ -124,14 +155,16 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) GEM_BUG_ON(i915_request_completed(rq)); + rcu_read_lock(); /* protect the GEM context */ if (guilty) { i915_request_skip(rq, -EIO); - if (context_mark_guilty(rq->gem_context)) + if (mark_guilty(rq)) engine_skip_context(rq); } else { dma_fence_set_error(&rq->fence, -EAGAIN); - context_mark_innocent(rq->gem_context); + mark_innocent(rq); } + rcu_read_unlock(); } static bool i915_in_reset(struct pci_dev *pdev) @@ -647,7 +680,8 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) * GPU state upon resume, i.e. fail to restart after a reset. */ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); - engine->reset.prepare(engine); + if (engine->reset.prepare) + engine->reset.prepare(engine); } static void revoke_mmaps(struct intel_gt *gt) @@ -667,8 +701,13 @@ static void revoke_mmaps(struct intel_gt *gt) continue; GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); - node = &vma->obj->base.vma_node; + + if (!vma->mmo) + continue; + + node = &vma->mmo->vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, drm_vma_node_offset_addr(node) + vma_offset, vma->size, @@ -722,10 +761,11 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) static void reset_finish_engine(struct intel_engine_cs *engine) { - engine->reset.finish(engine); + if (engine->reset.finish) + engine->reset.finish(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); - intel_engine_breadcrumbs_irq(engine); + intel_engine_signal_breadcrumbs(engine); } static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) @@ -754,7 +794,7 @@ static void nop_submit_request(struct i915_request *request) i915_request_mark_complete(request); spin_unlock_irqrestore(&engine->active.lock, flags); - intel_engine_queue_breadcrumbs(engine); + intel_engine_signal_breadcrumbs(engine); } static void __intel_gt_set_wedged(struct intel_gt *gt) @@ -799,7 +839,8 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) /* Mark all executing requests as skipped */ for_each_engine(engine, gt, id) - engine->cancel_requests(engine); + if (engine->reset.cancel) + engine->reset.cancel(engine); reset_finish(gt, awake); @@ -820,7 +861,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) { struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; - unsigned long flags; bool ok; if (!test_bit(I915_WEDGED, >->reset.flags)) @@ -842,7 +882,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) * * No more can be submitted until we reset the wedged bit. */ - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); list_for_each_entry(tl, &timelines->active_list, link) { struct dma_fence *fence; @@ -850,7 +890,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) if (!fence) continue; - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); /* * All internal dependencies (i915_requests) will have @@ -863,10 +903,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) dma_fence_put(fence); /* Restart iteration after droping lock */ - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); tl = list_entry(&timelines->active_list, typeof(*tl), link); } - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); /* We must reset pending GPU events before restoring our submission */ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ @@ -1024,8 +1064,6 @@ void intel_gt_reset(struct intel_gt *gt, if (ret) goto taint; - intel_gt_queue_hangcheck(gt); - finish: reset_finish(gt, awake); unlock: @@ -1072,9 +1110,10 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) { struct intel_gt *gt = engine->gt; + bool uses_guc = intel_engine_in_guc_submission_mode(engine); int ret; - GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags); + ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); if (!intel_engine_pm_get_if_awake(engine)) @@ -1087,14 +1126,14 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) "Resetting %s for %s\n", engine->name, msg); atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); - if (!engine->gt->uc.guc.execbuf_client) + if (!uses_guc) ret = intel_gt_reset_engine(engine); else ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", - engine->gt->uc.guc.execbuf_client ? "GuC " : "", + uses_guc ? "GuC " : "", engine->name, ret); goto out; } @@ -1116,7 +1155,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) out: intel_engine_cancel_stop_cs(engine); reset_finish_engine(engine); - intel_engine_pm_put(engine); + intel_engine_pm_put_async(engine); return ret; } @@ -1353,4 +1392,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_reset.c" +#include "selftest_hangcheck.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c new file mode 100644 index 000000000000..374b28f13ca0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -0,0 +1,318 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "gem/i915_gem_object.h" +#include "i915_drv.h" +#include "i915_vma.h" +#include "intel_engine.h" +#include "intel_ring.h" +#include "intel_timeline.h" + +unsigned int intel_ring_update_space(struct intel_ring *ring) +{ + unsigned int space; + + space = __intel_ring_space(ring->head, ring->emit, ring->size); + + ring->space = space; + return space; +} + +int intel_ring_pin(struct intel_ring *ring) +{ + struct i915_vma *vma = ring->vma; + unsigned int flags; + void *addr; + int ret; + + if (atomic_fetch_inc(&ring->pin_count)) + return 0; + + flags = PIN_GLOBAL; + + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + + if (vma->obj->stolen) + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + + ret = i915_vma_pin(vma, 0, 0, flags); + if (unlikely(ret)) + goto err_unpin; + + if (i915_vma_is_map_and_fenceable(vma)) + addr = (void __force *)i915_vma_pin_iomap(vma); + else + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto err_ring; + } + + i915_vma_make_unshrinkable(vma); + + /* Discard any unused bytes beyond that submitted to hw. */ + intel_ring_reset(ring, ring->emit); + + ring->vaddr = addr; + return 0; + +err_ring: + i915_vma_unpin(vma); +err_unpin: + atomic_dec(&ring->pin_count); + return ret; +} + +void intel_ring_reset(struct intel_ring *ring, u32 tail) +{ + tail = intel_ring_wrap(ring, tail); + ring->tail = tail; + ring->head = tail; + ring->emit = tail; + intel_ring_update_space(ring); +} + +void intel_ring_unpin(struct intel_ring *ring) +{ + struct i915_vma *vma = ring->vma; + + if (!atomic_dec_and_test(&ring->pin_count)) + return; + + i915_vma_unset_ggtt_write(vma); + if (i915_vma_is_map_and_fenceable(vma)) + i915_vma_unpin_iomap(vma); + else + i915_gem_object_unpin_map(vma->obj); + + i915_vma_make_purgeable(vma); + i915_vma_unpin(vma); +} + +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = ERR_PTR(-ENODEV); + if (i915_ggtt_has_aperture(ggtt)) + obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(obj)) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size) +{ + struct drm_i915_private *i915 = engine->i915; + struct intel_ring *ring; + struct i915_vma *vma; + + GEM_BUG_ON(!is_power_of_2(size)); + GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + return ERR_PTR(-ENOMEM); + + kref_init(&ring->ref); + ring->size = size; + + /* + * Workaround an erratum on the i830 which causes a hang if + * the TAIL pointer points to within the last 2 cachelines + * of the buffer. + */ + ring->effective_size = size; + if (IS_I830(i915) || IS_I845G(i915)) + ring->effective_size -= 2 * CACHELINE_BYTES; + + intel_ring_update_space(ring); + + vma = create_ring_vma(engine->gt->ggtt, size); + if (IS_ERR(vma)) { + kfree(ring); + return ERR_CAST(vma); + } + ring->vma = vma; + + return ring; +} + +void intel_ring_free(struct kref *ref) +{ + struct intel_ring *ring = container_of(ref, typeof(*ring), ref); + + i915_vma_put(ring->vma); + kfree(ring); +} + +static noinline int +wait_for_space(struct intel_ring *ring, + struct intel_timeline *tl, + unsigned int bytes) +{ + struct i915_request *target; + long timeout; + + if (intel_ring_update_space(ring) >= bytes) + return 0; + + GEM_BUG_ON(list_empty(&tl->requests)); + list_for_each_entry(target, &tl->requests, link) { + if (target->ring != ring) + continue; + + /* Would completion of this request free enough space? */ + if (bytes <= __intel_ring_space(target->postfix, + ring->emit, ring->size)) + break; + } + + if (GEM_WARN_ON(&target->link == &tl->requests)) + return -ENOSPC; + + timeout = i915_request_wait(target, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (timeout < 0) + return timeout; + + i915_request_retire_upto(target); + + intel_ring_update_space(ring); + GEM_BUG_ON(ring->space < bytes); + return 0; +} + +u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) +{ + struct intel_ring *ring = rq->ring; + const unsigned int remain_usable = ring->effective_size - ring->emit; + const unsigned int bytes = num_dwords * sizeof(u32); + unsigned int need_wrap = 0; + unsigned int total_bytes; + u32 *cs; + + /* Packets must be qword aligned. */ + GEM_BUG_ON(num_dwords & 1); + + total_bytes = bytes + rq->reserved_space; + GEM_BUG_ON(total_bytes > ring->effective_size); + + if (unlikely(total_bytes > remain_usable)) { + const int remain_actual = ring->size - ring->emit; + + if (bytes > remain_usable) { + /* + * Not enough space for the basic request. So need to + * flush out the remainder and then wait for + * base + reserved. + */ + total_bytes += remain_actual; + need_wrap = remain_actual | 1; + } else { + /* + * The base request will fit but the reserved space + * falls off the end. So we don't need an immediate + * wrap and only need to effectively wait for the + * reserved size from the start of ringbuffer. + */ + total_bytes = rq->reserved_space + remain_actual; + } + } + + if (unlikely(total_bytes > ring->space)) { + int ret; + + /* + * Space is reserved in the ringbuffer for finalising the + * request, as that cannot be allowed to fail. During request + * finalisation, reserved_space is set to 0 to stop the + * overallocation and the assumption is that then we never need + * to wait (which has the risk of failing with EINTR). + * + * See also i915_request_alloc() and i915_request_add(). + */ + GEM_BUG_ON(!rq->reserved_space); + + ret = wait_for_space(ring, + i915_request_timeline(rq), + total_bytes); + if (unlikely(ret)) + return ERR_PTR(ret); + } + + if (unlikely(need_wrap)) { + need_wrap &= ~1; + GEM_BUG_ON(need_wrap > ring->space); + GEM_BUG_ON(ring->emit + need_wrap > ring->size); + GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); + + /* Fill the tail with MI_NOOP */ + memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); + ring->space -= need_wrap; + ring->emit = 0; + } + + GEM_BUG_ON(ring->emit > ring->size - bytes); + GEM_BUG_ON(ring->space < bytes); + cs = ring->vaddr + ring->emit; + GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); + ring->emit += bytes; + ring->space -= bytes; + + return cs; +} + +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct i915_request *rq) +{ + int num_dwords; + void *cs; + + num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); + if (num_dwords == 0) + return 0; + + num_dwords = CACHELINE_DWORDS - num_dwords; + GEM_BUG_ON(num_dwords & 1); + + cs = intel_ring_begin(rq, num_dwords); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); + intel_ring_advance(rq, cs + num_dwords); + + GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h new file mode 100644 index 000000000000..ea2839d9e044 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RING_H +#define INTEL_RING_H + +#include "i915_gem.h" /* GEM_BUG_ON */ +#include "i915_request.h" +#include "intel_ring_types.h" + +struct intel_engine_cs; + +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size); + +u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); +int intel_ring_cacheline_align(struct i915_request *rq); + +unsigned int intel_ring_update_space(struct intel_ring *ring); + +int intel_ring_pin(struct intel_ring *ring); +void intel_ring_unpin(struct intel_ring *ring); +void intel_ring_reset(struct intel_ring *ring, u32 tail); + +void intel_ring_free(struct kref *ref); + +static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) +{ + kref_get(&ring->ref); + return ring; +} + +static inline void intel_ring_put(struct intel_ring *ring) +{ + kref_put(&ring->ref, intel_ring_free); +} + +static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) +{ + /* Dummy function. + * + * This serves as a placeholder in the code so that the reader + * can compare against the preceding intel_ring_begin() and + * check that the number of dwords emitted matches the space + * reserved for the command packet (i.e. the value passed to + * intel_ring_begin()). + */ + GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); +} + +static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) +{ + return pos & (ring->size - 1); +} + +static inline bool +intel_ring_offset_valid(const struct intel_ring *ring, + unsigned int pos) +{ + if (pos & -ring->size) /* must be strictly within the ring */ + return false; + + if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ + return false; + + return true; +} + +static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) +{ + /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ + u32 offset = addr - rq->ring->vaddr; + GEM_BUG_ON(offset > rq->ring->size); + return intel_ring_wrap(rq->ring, offset); +} + +static inline void +assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) +{ + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); + + /* + * "Ring Buffer Use" + * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 + * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 + * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + * + * We use ring->head as the last known location of the actual RING_HEAD, + * it may have advanced but in the worst case it is equally the same + * as ring->head and so we should never program RING_TAIL to advance + * into the same cacheline as ring->head. + */ +#define cacheline(a) round_down(a, CACHELINE_BYTES) + GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && + tail < ring->head); +#undef cacheline +} + +static inline unsigned int +intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) +{ + /* Whilst writes to the tail are strictly order, there is no + * serialisation between readers and the writers. The tail may be + * read by i915_request_retire() just as it is being updated + * by execlists, as although the breadcrumb is complete, the context + * switch hasn't been seen. + */ + assert_ring_tail_valid(ring, tail); + ring->tail = tail; + return tail; +} + +static inline unsigned int +__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) +{ + /* + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + */ + GEM_BUG_ON(!is_power_of_2(size)); + return (head - tail - CACHELINE_BYTES) & (size - 1); +} + +#endif /* INTEL_RING_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index bf631f15aa78..81f872f9ef03 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -40,6 +40,7 @@ #include "intel_gt_irq.h" #include "intel_gt_pm_irq.h" #include "intel_reset.h" +#include "intel_ring.h" #include "intel_workarounds.h" /* Rough estimate of the typical request size, performing a flush, @@ -47,16 +48,6 @@ */ #define LEGACY_REQUEST_SIZE 200 -unsigned int intel_ring_update_space(struct intel_ring *ring) -{ - unsigned int space; - - space = __intel_ring_space(ring->head, ring->emit, ring->size); - - ring->space = space; - return space; -} - static int gen2_render_ring_flush(struct i915_request *rq, u32 mode) { @@ -371,6 +362,12 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode) */ flags |= PIPE_CONTROL_CS_STALL; + /* + * CS_STALL suggests at least a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + /* Just flush everything. Experiments have shown that reducing the * number of bits based on the write domains has little performance * impact. @@ -389,13 +386,6 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode) flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; - /* - * TLB invalidate requires a post-sync write. - */ - flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; - - flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; /* Workaround: we must issue a pipe_control with CS-stall bit * set before a pipe_control command that has the state cache @@ -463,7 +453,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); - *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB | + MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = rq->fence.seqno; @@ -505,14 +496,13 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask) static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) { - struct drm_i915_private *dev_priv = engine->i915; u32 addr; addr = lower_32_bits(phys); - if (INTEL_GEN(dev_priv) >= 4) + if (INTEL_GEN(engine->i915) >= 4) addr |= (phys >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); + intel_uncore_write(engine->uncore, HWS_PGA, addr); } static struct page *status_page(struct intel_engine_cs *engine) @@ -531,14 +521,13 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine) static void set_hwsp(struct intel_engine_cs *engine, u32 offset) { - struct drm_i915_private *dev_priv = engine->i915; i915_reg_t hwsp; /* * The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN(dev_priv, 7)) { + if (IS_GEN(engine->i915, 7)) { switch (engine->id) { /* * No more rings exist on Gen7. Default case is only to shut up @@ -560,14 +549,14 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) hwsp = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN(dev_priv, 6)) { + } else if (IS_GEN(engine->i915, 6)) { hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); } else { hwsp = RING_HWS_PGA(engine->mmio_base); } - I915_WRITE(hwsp, offset); - POSTING_READ(hwsp); + intel_uncore_write(engine->uncore, hwsp, offset); + intel_uncore_posting_read(engine->uncore, hwsp); } static void flush_cs_tlb(struct intel_engine_cs *engine) @@ -642,8 +631,8 @@ static int xcs_resume(struct intel_engine_cs *engine) struct intel_ring *ring = engine->legacy.ring; int ret = 0; - GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n", - engine->name, ring->head, ring->tail); + ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", + ring->head, ring->tail); intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); @@ -730,7 +719,7 @@ static int xcs_resume(struct intel_engine_cs *engine) } /* Papering over lost _interrupts_ immediately following the restart */ - intel_engine_queue_breadcrumbs(engine); + intel_engine_signal_breadcrumbs(engine); out: intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); @@ -756,10 +745,10 @@ static void reset_prepare(struct intel_engine_cs *engine) * * FIXME: Wa for more modern gens needs to be validated */ - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); if (intel_engine_stop_cs(engine)) - GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); + ENGINE_TRACE(engine, "timed out on STOP_RING\n"); intel_uncore_write_fw(uncore, RING_HEAD(base), @@ -775,12 +764,11 @@ static void reset_prepare(struct intel_engine_cs *engine) /* Check acts as a post */ if (intel_uncore_read_fw(uncore, RING_HEAD(base))) - GEM_TRACE("%s: ring head [%x] not parked\n", - engine->name, - intel_uncore_read_fw(uncore, RING_HEAD(base))); + ENGINE_TRACE(engine, "ring head [%x] not parked\n", + intel_uncore_read_fw(uncore, RING_HEAD(base))); } -static void reset_ring(struct intel_engine_cs *engine, bool stalled) +static void reset_rewind(struct intel_engine_cs *engine, bool stalled) { struct i915_request *pos, *rq; unsigned long flags; @@ -851,7 +839,8 @@ static void reset_finish(struct intel_engine_cs *engine) static int rcs_resume(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; /* * Disable CONSTANT_BUFFER before it is loaded from the context @@ -863,13 +852,14 @@ static int rcs_resume(struct intel_engine_cs *engine) * they are already accustomed to from before contexts were * enabled. */ - if (IS_GEN(dev_priv, 4)) - I915_WRITE(ECOSKPD, + if (IS_GEN(i915, 4)) + intel_uncore_write(uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (IS_GEN_RANGE(dev_priv, 4, 6)) - I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); + if (IS_GEN_RANGE(i915, 4, 6)) + intel_uncore_write(uncore, MI_MODE, + _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order * to use MI_WAIT_FOR_EVENT within the CS. It should already be @@ -877,38 +867,40 @@ static int rcs_resume(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (IS_GEN_RANGE(dev_priv, 6, 7)) - I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + if (IS_GEN_RANGE(i915, 6, 7)) + intel_uncore_write(uncore, MI_MODE, + _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (IS_GEN(dev_priv, 6)) - I915_WRITE(GFX_MODE, + if (IS_GEN(i915, 6)) + intel_uncore_write(uncore, GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN(dev_priv, 7)) - I915_WRITE(GFX_MODE_GEN7, + if (IS_GEN(i915, 7)) + intel_uncore_write(uncore, GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN(dev_priv, 6)) { + if (IS_GEN(i915, 6)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement * policy is not supported." */ - I915_WRITE(CACHE_MODE_0, + intel_uncore_write(uncore, CACHE_MODE_0, _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (IS_GEN_RANGE(dev_priv, 6, 7)) - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); + if (IS_GEN_RANGE(i915, 6, 7)) + intel_uncore_write(uncore, INSTPM, + _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); return xcs_resume(engine); } -static void cancel_requests(struct intel_engine_cs *engine) +static void reset_cancel(struct intel_engine_cs *engine) { struct i915_request *request; unsigned long flags; @@ -1186,162 +1178,6 @@ i915_emit_bb_start(struct i915_request *rq, return 0; } -int intel_ring_pin(struct intel_ring *ring) -{ - struct i915_vma *vma = ring->vma; - unsigned int flags; - void *addr; - int ret; - - if (atomic_fetch_inc(&ring->pin_count)) - return 0; - - flags = PIN_GLOBAL; - - /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - - if (vma->obj->stolen) - flags |= PIN_MAPPABLE; - else - flags |= PIN_HIGH; - - ret = i915_vma_pin(vma, 0, 0, flags); - if (unlikely(ret)) - goto err_unpin; - - if (i915_vma_is_map_and_fenceable(vma)) - addr = (void __force *)i915_vma_pin_iomap(vma); - else - addr = i915_gem_object_pin_map(vma->obj, - i915_coherent_map_type(vma->vm->i915)); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto err_ring; - } - - i915_vma_make_unshrinkable(vma); - - GEM_BUG_ON(ring->vaddr); - ring->vaddr = addr; - - return 0; - -err_ring: - i915_vma_unpin(vma); -err_unpin: - atomic_dec(&ring->pin_count); - return ret; -} - -void intel_ring_reset(struct intel_ring *ring, u32 tail) -{ - tail = intel_ring_wrap(ring, tail); - ring->tail = tail; - ring->head = tail; - ring->emit = tail; - intel_ring_update_space(ring); -} - -void intel_ring_unpin(struct intel_ring *ring) -{ - struct i915_vma *vma = ring->vma; - - if (!atomic_dec_and_test(&ring->pin_count)) - return; - - /* Discard any unused bytes beyond that submitted to hw. */ - intel_ring_reset(ring, ring->emit); - - i915_vma_unset_ggtt_write(vma); - if (i915_vma_is_map_and_fenceable(vma)) - i915_vma_unpin_iomap(vma); - else - i915_gem_object_unpin_map(vma->obj); - - GEM_BUG_ON(!ring->vaddr); - ring->vaddr = NULL; - - i915_vma_unpin(vma); - i915_vma_make_purgeable(vma); -} - -static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) -{ - struct i915_address_space *vm = &ggtt->vm; - struct drm_i915_private *i915 = vm->i915; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - - obj = i915_gem_object_create_stolen(i915, size); - if (IS_ERR(obj)) - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - /* - * Mark ring buffers as read-only from GPU side (so no stray overwrites) - * if supported by the platform's GGTT. - */ - if (vm->has_read_only) - i915_gem_object_set_readonly(obj); - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) - goto err; - - return vma; - -err: - i915_gem_object_put(obj); - return vma; -} - -struct intel_ring * -intel_engine_create_ring(struct intel_engine_cs *engine, int size) -{ - struct drm_i915_private *i915 = engine->i915; - struct intel_ring *ring; - struct i915_vma *vma; - - GEM_BUG_ON(!is_power_of_2(size)); - GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); - - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - return ERR_PTR(-ENOMEM); - - kref_init(&ring->ref); - - ring->size = size; - /* Workaround an erratum on the i830 which causes a hang if - * the TAIL pointer points to within the last 2 cachelines - * of the buffer. - */ - ring->effective_size = size; - if (IS_I830(i915) || IS_I845G(i915)) - ring->effective_size -= 2 * CACHELINE_BYTES; - - intel_ring_update_space(ring); - - vma = create_ring_vma(engine->gt->ggtt, size); - if (IS_ERR(vma)) { - kfree(ring); - return ERR_CAST(vma); - } - ring->vma = vma; - - return ring; -} - -void intel_ring_free(struct kref *ref) -{ - struct intel_ring *ring = container_of(ref, typeof(*ring), ref); - - i915_vma_put(ring->vma); - kfree(ring); -} - static void __ring_context_fini(struct intel_context *ce) { i915_vma_put(ce->state); @@ -1483,6 +1319,8 @@ static int ring_context_alloc(struct intel_context *ce) return PTR_ERR(vma); ce->state = vma; + if (engine->default_state) + __set_bit(CONTEXT_VALID_BIT, &ce->flags); } return 0; @@ -1525,45 +1363,37 @@ static const struct intel_context_ops ring_context_ops = { .destroy = ring_context_destroy, }; -static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) +static int load_pd_dir(struct i915_request *rq, + const struct i915_ppgtt *ppgtt, + u32 valid) { const struct intel_engine_cs * const engine = rq->engine; u32 *cs; - cs = intel_ring_begin(rq, 6); + cs = intel_ring_begin(rq, 12); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); - *cs++ = PP_DIR_DCLV_2G; + *cs++ = valid; *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; - intel_ring_advance(rq, cs); - - return 0; -} - -static int flush_pd_dir(struct i915_request *rq) -{ - const struct intel_engine_cs * const engine = rq->engine; - u32 *cs; - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* Stall until the page table load is complete */ + /* Stall until the page table load is complete? */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = intel_gt_scratch_offset(rq->engine->gt, + *cs++ = intel_gt_scratch_offset(engine->gt, INTEL_GT_SCRATCH_FIELD_DEFAULT); - *cs++ = MI_NOOP; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); + *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); intel_ring_advance(rq, cs); + return 0; } @@ -1650,7 +1480,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) *cs++ = MI_NOOP; *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; + *cs++ = i915_ggtt_offset(rq->context->state) | flags; /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -1720,10 +1550,10 @@ static int remap_l3_slice(struct i915_request *rq, int slice) static int remap_l3(struct i915_request *rq) { - struct i915_gem_context *ctx = rq->gem_context; + struct i915_gem_context *ctx = i915_request_gem_context(rq); int i, err; - if (!ctx->remap_slice) + if (!ctx || !ctx->remap_slice) return 0; for (i = 0; i < MAX_L3_SLICES; i++) { @@ -1739,34 +1569,50 @@ static int remap_l3(struct i915_request *rq) return 0; } +static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) +{ + int ret; + + if (!vm) + return 0; + + ret = rq->engine->emit_flush(rq, EMIT_FLUSH); + if (ret) + return ret; + + /* + * Not only do we need a full barrier (post-sync write) after + * invalidating the TLBs, but we need to wait a little bit + * longer. Whether this is merely delaying us, or the + * subsequent flush is a key part of serialising with the + * post-sync op, this extra pass appears vital before a + * mm switch! + */ + ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G); + if (ret) + return ret; + + return rq->engine->emit_flush(rq, EMIT_FLUSH); +} + static int switch_context(struct i915_request *rq) { - struct intel_context *ce = rq->hw_context; - struct i915_address_space *vm = vm_alias(ce); + struct intel_context *ce = rq->context; int ret; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); - if (vm) { - ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm)); - if (ret) - return ret; - } + ret = switch_mm(rq, vm_alias(ce)); + if (ret) + return ret; if (ce->state) { u32 hw_flags; GEM_BUG_ON(rq->engine->id != RCS0); - /* - * The kernel context(s) is treated as pure scratch and is not - * expected to retain any state (as we sacrifice it during - * suspend and on resume it may be corrupted). This is ok, - * as nothing actually executes using the kernel context; it - * is purely used for flushing user contexts. - */ hw_flags = 0; - if (i915_gem_context_is_kernel(rq->gem_context)) + if (!test_bit(CONTEXT_VALID_BIT, &ce->flags)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); @@ -1774,34 +1620,6 @@ static int switch_context(struct i915_request *rq) return ret; } - if (vm) { - struct intel_engine_cs *engine = rq->engine; - - ret = engine->emit_flush(rq, EMIT_INVALIDATE); - if (ret) - return ret; - - ret = flush_pd_dir(rq); - if (ret) - return ret; - - /* - * Not only do we need a full barrier (post-sync write) after - * invalidating the TLBs, but we need to wait a little bit - * longer. Whether this is merely delaying us, or the - * subsequent flush is a key part of serialising with the - * post-sync op, this extra pass appears vital before a - * mm switch! - */ - ret = engine->emit_flush(rq, EMIT_INVALIDATE); - if (ret) - return ret; - - ret = engine->emit_flush(rq, EMIT_FLUSH); - if (ret) - return ret; - } - ret = remap_l3(rq); if (ret) return ret; @@ -1813,7 +1631,7 @@ static int ring_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); + GEM_BUG_ON(!intel_context_is_pinned(request->context)); GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); /* @@ -1836,148 +1654,6 @@ static int ring_request_alloc(struct i915_request *request) return 0; } -static noinline int -wait_for_space(struct intel_ring *ring, - struct intel_timeline *tl, - unsigned int bytes) -{ - struct i915_request *target; - long timeout; - - if (intel_ring_update_space(ring) >= bytes) - return 0; - - GEM_BUG_ON(list_empty(&tl->requests)); - list_for_each_entry(target, &tl->requests, link) { - if (target->ring != ring) - continue; - - /* Would completion of this request free enough space? */ - if (bytes <= __intel_ring_space(target->postfix, - ring->emit, ring->size)) - break; - } - - if (GEM_WARN_ON(&target->link == &tl->requests)) - return -ENOSPC; - - timeout = i915_request_wait(target, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (timeout < 0) - return timeout; - - i915_request_retire_upto(target); - - intel_ring_update_space(ring); - GEM_BUG_ON(ring->space < bytes); - return 0; -} - -u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) -{ - struct intel_ring *ring = rq->ring; - const unsigned int remain_usable = ring->effective_size - ring->emit; - const unsigned int bytes = num_dwords * sizeof(u32); - unsigned int need_wrap = 0; - unsigned int total_bytes; - u32 *cs; - - /* Packets must be qword aligned. */ - GEM_BUG_ON(num_dwords & 1); - - total_bytes = bytes + rq->reserved_space; - GEM_BUG_ON(total_bytes > ring->effective_size); - - if (unlikely(total_bytes > remain_usable)) { - const int remain_actual = ring->size - ring->emit; - - if (bytes > remain_usable) { - /* - * Not enough space for the basic request. So need to - * flush out the remainder and then wait for - * base + reserved. - */ - total_bytes += remain_actual; - need_wrap = remain_actual | 1; - } else { - /* - * The base request will fit but the reserved space - * falls off the end. So we don't need an immediate - * wrap and only need to effectively wait for the - * reserved size from the start of ringbuffer. - */ - total_bytes = rq->reserved_space + remain_actual; - } - } - - if (unlikely(total_bytes > ring->space)) { - int ret; - - /* - * Space is reserved in the ringbuffer for finalising the - * request, as that cannot be allowed to fail. During request - * finalisation, reserved_space is set to 0 to stop the - * overallocation and the assumption is that then we never need - * to wait (which has the risk of failing with EINTR). - * - * See also i915_request_alloc() and i915_request_add(). - */ - GEM_BUG_ON(!rq->reserved_space); - - ret = wait_for_space(ring, - i915_request_timeline(rq), - total_bytes); - if (unlikely(ret)) - return ERR_PTR(ret); - } - - if (unlikely(need_wrap)) { - need_wrap &= ~1; - GEM_BUG_ON(need_wrap > ring->space); - GEM_BUG_ON(ring->emit + need_wrap > ring->size); - GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); - - /* Fill the tail with MI_NOOP */ - memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); - ring->space -= need_wrap; - ring->emit = 0; - } - - GEM_BUG_ON(ring->emit > ring->size - bytes); - GEM_BUG_ON(ring->space < bytes); - cs = ring->vaddr + ring->emit; - GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); - ring->emit += bytes; - ring->space -= bytes; - - return cs; -} - -/* Align the ring tail to a cacheline boundary */ -int intel_ring_cacheline_align(struct i915_request *rq) -{ - int num_dwords; - void *cs; - - num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); - if (num_dwords == 0) - return 0; - - num_dwords = CACHELINE_DWORDS - num_dwords; - GEM_BUG_ON(num_dwords & 1); - - cs = intel_ring_begin(rq, num_dwords); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); - intel_ring_advance(rq, cs); - - GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); - return 0; -} - static void gen6_bsd_submit_request(struct i915_request *request) { struct intel_uncore *uncore = request->engine->uncore; @@ -2111,7 +1787,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode) static void i9xx_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = i9xx_submit_request; - engine->cancel_requests = cancel_requests; engine->park = NULL; engine->unpark = NULL; @@ -2123,7 +1798,7 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) engine->submit_request = gen6_bsd_submit_request; } -static void ring_destroy(struct intel_engine_cs *engine) +static void ring_release(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -2137,8 +1812,6 @@ static void ring_destroy(struct intel_engine_cs *engine) intel_timeline_unpin(engine->legacy.timeline); intel_timeline_put(engine->legacy.timeline); - - kfree(engine); } static void setup_irq(struct intel_engine_cs *engine) @@ -2169,11 +1842,12 @@ static void setup_common(struct intel_engine_cs *engine) setup_irq(engine); - engine->destroy = ring_destroy; + engine->release = ring_release; engine->resume = xcs_resume; engine->reset.prepare = reset_prepare; - engine->reset.reset = reset_ring; + engine->reset.rewind = reset_rewind; + engine->reset.cancel = reset_cancel; engine->reset.finish = reset_finish; engine->cops = &ring_context_ops; @@ -2284,6 +1958,10 @@ static void setup_vecs(struct intel_engine_cs *engine) int intel_ring_submission_setup(struct intel_engine_cs *engine) { + struct intel_timeline *timeline; + struct intel_ring *ring; + int err; + setup_common(engine); switch (engine->class) { @@ -2304,15 +1982,6 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) return -ENODEV; } - return 0; -} - -int intel_ring_submission_init(struct intel_engine_cs *engine) -{ - struct intel_timeline *timeline; - struct intel_ring *ring; - int err; - timeline = intel_timeline_create(engine->gt, engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); @@ -2338,16 +2007,10 @@ int intel_ring_submission_init(struct intel_engine_cs *engine) engine->legacy.ring = ring; engine->legacy.timeline = timeline; - err = intel_engine_init_common(engine); - if (err) - goto err_ring_unpin; - GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); return 0; -err_ring_unpin: - intel_ring_unpin(ring); err_ring: intel_ring_put(ring); err_timeline_unpin: diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h new file mode 100644 index 000000000000..d9f17f38e0cc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RING_TYPES_H +#define INTEL_RING_TYPES_H + +#include <linux/atomic.h> +#include <linux/kref.h> +#include <linux/types.h> + +/* + * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, + * but keeps the logic simple. Indeed, the whole purpose of this macro is just + * to give some inclination as to some of the magic values used in the various + * workarounds! + */ +#define CACHELINE_BYTES 64 +#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) + +struct i915_vma; + +struct intel_ring { + struct kref ref; + struct i915_vma *vma; + void *vaddr; + + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + + u32 head; + u32 tail; + u32 emit; + + u32 space; + u32 size; + u32 effective_size; +}; + +#endif /* INTEL_RING_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c new file mode 100644 index 000000000000..f232036c3c7a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -0,0 +1,1903 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_irq.h" +#include "intel_gt_pm_irq.h" +#include "intel_rps.h" +#include "intel_sideband.h" +#include "../../../platform/x86/intel_ips.h" + +/* + * Lock protecting IPS related data structures + */ +static DEFINE_SPINLOCK(mchdev_lock); + +static struct intel_gt *rps_to_gt(struct intel_rps *rps) +{ + return container_of(rps, struct intel_gt, rps); +} + +static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) +{ + return rps_to_gt(rps)->i915; +} + +static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) +{ + return rps_to_gt(rps)->uncore; +} + +static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) +{ + return mask & ~rps->pm_intrmsk_mbz; +} + +static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) +{ + intel_uncore_write_fw(uncore, reg, val); +} + +static u32 rps_pm_mask(struct intel_rps *rps, u8 val) +{ + u32 mask = 0; + + /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ + if (val > rps->min_freq_softlimit) + mask |= (GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + + if (val < rps->max_freq_softlimit) + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; + + mask &= rps->pm_events; + + return rps_pm_sanitize_mask(rps, ~mask); +} + +static void rps_reset_ei(struct intel_rps *rps) +{ + memset(&rps->ei, 0, sizeof(rps->ei)); +} + +static void rps_enable_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + rps_reset_ei(rps); + + if (IS_VALLEYVIEW(gt->i915)) + /* WaGsvRC0ResidencyMethod:vlv */ + rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_enable_irq(gt, rps->pm_events); + spin_unlock_irq(>->irq_lock); + + set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq)); +} + +static void gen6_rps_reset_interrupts(struct intel_rps *rps) +{ + gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); +} + +static void gen11_rps_reset_interrupts(struct intel_rps *rps) +{ + while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) + ; +} + +static void rps_reset_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + spin_lock_irq(>->irq_lock); + if (INTEL_GEN(gt->i915) >= 11) + gen11_rps_reset_interrupts(rps); + else + gen6_rps_reset_interrupts(rps); + + rps->pm_iir = 0; + spin_unlock_irq(>->irq_lock); +} + +static void rps_disable_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + rps->pm_events = 0; + + set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); + spin_unlock_irq(>->irq_lock); + + intel_synchronize_irq(gt->i915); + + /* + * Now that we will not be generating any more work, flush any + * outstanding tasks. As we are called on the RPS idle path, + * we will reset the GPU to minimum frequencies, so the current + * state of the worker can be discarded. + */ + cancel_work_sync(&rps->work); + + rps_reset_interrupts(rps); +} + +static const struct cparams { + u16 i; + u16 t; + u16 m; + u16 c; +} cparams[] = { + { 1, 1333, 301, 28664 }, + { 1, 1066, 294, 24460 }, + { 1, 800, 294, 25192 }, + { 0, 1333, 276, 27605 }, + { 0, 1066, 276, 27605 }, + { 0, 800, 231, 23784 }, +}; + +static void gen5_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + u8 fmax, fmin, fstart; + u32 rgvmodectl; + int c_m, i; + + if (i915->fsb_freq <= 3200) + c_m = 0; + else if (i915->fsb_freq <= 4800) + c_m = 1; + else + c_m = 2; + + for (i = 0; i < ARRAY_SIZE(cparams); i++) { + if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { + rps->ips.m = cparams[i].m; + rps->ips.c = cparams[i].c; + break; + } + } + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); + + rps->min_freq = fmax; + rps->max_freq = fmin; + + rps->idle_freq = rps->min_freq; + rps->cur_freq = rps->idle_freq; +} + +static unsigned long +__ips_chipset_val(struct intel_ips *ips) +{ + struct intel_uncore *uncore = + rps_to_uncore(container_of(ips, struct intel_rps, ips)); + unsigned long now = jiffies_to_msecs(jiffies), dt; + unsigned long result; + u64 total, delta; + + lockdep_assert_held(&mchdev_lock); + + /* + * Prevent division-by-zero if we are asking too fast. + * Also, we don't get interesting results if we are polling + * faster than once in 10ms, so just return the saved value + * in such cases. + */ + dt = now - ips->last_time1; + if (dt <= 10) + return ips->chipset_power; + + /* FIXME: handle per-counter overflow */ + total = intel_uncore_read(uncore, DMIEC); + total += intel_uncore_read(uncore, DDREC); + total += intel_uncore_read(uncore, CSIEC); + + delta = total - ips->last_count1; + + result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); + + ips->last_count1 = total; + ips->last_time1 = now; + + ips->chipset_power = result; + + return result; +} + +static unsigned long ips_mch_val(struct intel_uncore *uncore) +{ + unsigned int m, x, b; + u32 tsfs; + + tsfs = intel_uncore_read(uncore, TSFS); + x = intel_uncore_read8(uncore, TR1); + + b = tsfs & TSFS_INTR_MASK; + m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; + + return m * x / 127 - b; +} + +static int _pxvid_to_vd(u8 pxvid) +{ + if (pxvid == 0) + return 0; + + if (pxvid >= 8 && pxvid < 31) + pxvid = 31; + + return (pxvid + 2) * 125; +} + +static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) +{ + const int vd = _pxvid_to_vd(pxvid); + + if (INTEL_INFO(i915)->is_mobile) + return max(vd - 1125, 0); + + return vd; +} + +static void __gen5_ips_update(struct intel_ips *ips) +{ + struct intel_uncore *uncore = + rps_to_uncore(container_of(ips, struct intel_rps, ips)); + u64 now, delta, dt; + u32 count; + + lockdep_assert_held(&mchdev_lock); + + now = ktime_get_raw_ns(); + dt = now - ips->last_time2; + do_div(dt, NSEC_PER_MSEC); + + /* Don't divide by 0 */ + if (dt <= 10) + return; + + count = intel_uncore_read(uncore, GFXEC); + delta = count - ips->last_count2; + + ips->last_count2 = count; + ips->last_time2 = now; + + /* More magic constants... */ + ips->gfx_power = div_u64(delta * 1181, dt * 10); +} + +static void gen5_rps_update(struct intel_rps *rps) +{ + spin_lock_irq(&mchdev_lock); + __gen5_ips_update(&rps->ips); + spin_unlock_irq(&mchdev_lock); +} + +static bool gen5_rps_set(struct intel_rps *rps, u8 val) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u16 rgvswctl; + + lockdep_assert_held(&mchdev_lock); + + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + /* Invert the frequency bin into an ips delay */ + val = rps->max_freq - val; + val = rps->min_freq + val; + + rgvswctl = + (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | + MEMCTL_SFCAVM; + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + intel_uncore_posting_read16(uncore, MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + + return true; +} + +static unsigned long intel_pxfreq(u32 vidfreq) +{ + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + return div * 133333 / (pre << post); +} + +static unsigned int init_emon(struct intel_uncore *uncore) +{ + u8 pxw[16]; + int i; + + /* Disable to program */ + intel_uncore_write(uncore, ECR, 0); + intel_uncore_posting_read(uncore, ECR); + + /* Program energy weights for various events */ + intel_uncore_write(uncore, SDEW, 0x15040d00); + intel_uncore_write(uncore, CSIEW0, 0x007f0000); + intel_uncore_write(uncore, CSIEW1, 0x1e220004); + intel_uncore_write(uncore, CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + intel_uncore_write(uncore, PEW(i), 0); + for (i = 0; i < 3; i++) + intel_uncore_write(uncore, DEW(i), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); + unsigned int freq = intel_pxfreq(pxvidfreq); + unsigned int vid = + (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; + unsigned int val; + + val = vid * vid * freq / 1000 * 255; + val /= 127 * 127 * 900; + + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + intel_uncore_write(uncore, PXW(i), + pxw[i * 4 + 0] << 24 | + pxw[i * 4 + 1] << 16 | + pxw[i * 4 + 2] << 8 | + pxw[i * 4 + 3] << 0); + } + + /* Adjust magic regs to magic values (more experimental results) */ + intel_uncore_write(uncore, OGW0, 0); + intel_uncore_write(uncore, OGW1, 0); + intel_uncore_write(uncore, EG0, 0x00007f00); + intel_uncore_write(uncore, EG1, 0x0000000e); + intel_uncore_write(uncore, EG2, 0x000e0000); + intel_uncore_write(uncore, EG3, 0x68000300); + intel_uncore_write(uncore, EG4, 0x42000000); + intel_uncore_write(uncore, EG5, 0x00140031); + intel_uncore_write(uncore, EG6, 0); + intel_uncore_write(uncore, EG7, 0); + + for (i = 0; i < 8; i++) + intel_uncore_write(uncore, PXWL(i), 0); + + /* Enable PMON + select events */ + intel_uncore_write(uncore, ECR, 0x80000019); + + return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; +} + +static bool gen5_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u8 fstart, vstart; + u32 rgvmodectl; + + spin_lock_irq(&mchdev_lock); + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + + /* Enable temp reporting */ + intel_uncore_write16(uncore, PMMISC, + intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); + intel_uncore_write16(uncore, TSC1, + intel_uncore_read16(uncore, TSC1) | TSE); + + /* 100ms RC evaluation intervals */ + intel_uncore_write(uncore, RCUPEI, 100000); + intel_uncore_write(uncore, RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + intel_uncore_write(uncore, RCBMAXAVG, 90000); + intel_uncore_write(uncore, RCBMINAVG, 80000); + + intel_uncore_write(uncore, MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + + vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & + PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; + + intel_uncore_write(uncore, + MEMINTREN, + MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + intel_uncore_write(uncore, VIDSTART, vstart); + intel_uncore_posting_read(uncore, VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); + + if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & + MEMCTL_CMD_STS) == 0, 10)) + DRM_ERROR("stuck trying to change perf mode\n"); + mdelay(1); + + gen5_rps_set(rps, rps->cur_freq); + + rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); + rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); + rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); + rps->ips.last_time1 = jiffies_to_msecs(jiffies); + + rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); + rps->ips.last_time2 = ktime_get_raw_ns(); + + spin_unlock_irq(&mchdev_lock); + + rps->ips.corr = init_emon(uncore); + + return true; +} + +static void gen5_rps_disable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u16 rgvswctl; + + spin_lock_irq(&mchdev_lock); + + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + intel_uncore_write(uncore, MEMINTREN, + intel_uncore_read(uncore, MEMINTREN) & + ~MEMINT_EVAL_CHG_EN); + intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + intel_uncore_write(uncore, DEIER, + intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); + intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); + intel_uncore_write(uncore, DEIMR, + intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + gen5_rps_set(rps, rps->idle_freq); + mdelay(1); + rgvswctl |= MEMCTL_CMD_STS; + intel_uncore_write(uncore, MEMSWCTL, rgvswctl); + mdelay(1); + + spin_unlock_irq(&mchdev_lock); +} + +static u32 rps_limits(struct intel_rps *rps, u8 val) +{ + u32 limits; + + /* + * Only set the down limit when we've reached the lowest level to avoid + * getting more interrupts, otherwise leave this clear. This prevents a + * race in the hw when coming out of rc6: There's a tiny window where + * the hw runs at the minimal clock before selecting the desired + * frequency, if the down threshold expires in that window we will not + * receive a down interrupt. + */ + if (INTEL_GEN(rps_to_i915(rps)) >= 9) { + limits = rps->max_freq_softlimit << 23; + if (val <= rps->min_freq_softlimit) + limits |= rps->min_freq_softlimit << 14; + } else { + limits = rps->max_freq_softlimit << 24; + if (val <= rps->min_freq_softlimit) + limits |= rps->min_freq_softlimit << 16; + } + + return limits; +} + +static void rps_set_power(struct intel_rps *rps, int new_power) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 threshold_up = 0, threshold_down = 0; /* in % */ + u32 ei_up = 0, ei_down = 0; + + lockdep_assert_held(&rps->power.mutex); + + if (new_power == rps->power.mode) + return; + + /* Note the units here are not exactly 1us, but 1280ns. */ + switch (new_power) { + case LOW_POWER: + /* Upclock if more than 95% busy over 16ms */ + ei_up = 16000; + threshold_up = 95; + + /* Downclock if less than 85% busy over 32ms */ + ei_down = 32000; + threshold_down = 85; + break; + + case BETWEEN: + /* Upclock if more than 90% busy over 13ms */ + ei_up = 13000; + threshold_up = 90; + + /* Downclock if less than 75% busy over 32ms */ + ei_down = 32000; + threshold_down = 75; + break; + + case HIGH_POWER: + /* Upclock if more than 85% busy over 10ms */ + ei_up = 10000; + threshold_up = 85; + + /* Downclock if less than 60% busy over 32ms */ + ei_down = 32000; + threshold_down = 60; + break; + } + + /* When byt can survive without system hang with dynamic + * sw freq adjustments, this restriction can be lifted. + */ + if (IS_VALLEYVIEW(i915)) + goto skip_hw_write; + + set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up)); + set(uncore, GEN6_RP_UP_THRESHOLD, + GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100)); + + set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down)); + set(uncore, GEN6_RP_DOWN_THRESHOLD, + GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100)); + + set(uncore, GEN6_RP_CONTROL, + (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + +skip_hw_write: + rps->power.mode = new_power; + rps->power.up_threshold = threshold_up; + rps->power.down_threshold = threshold_down; +} + +static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) +{ + int new_power; + + new_power = rps->power.mode; + switch (rps->power.mode) { + case LOW_POWER: + if (val > rps->efficient_freq + 1 && + val > rps->cur_freq) + new_power = BETWEEN; + break; + + case BETWEEN: + if (val <= rps->efficient_freq && + val < rps->cur_freq) + new_power = LOW_POWER; + else if (val >= rps->rp0_freq && + val > rps->cur_freq) + new_power = HIGH_POWER; + break; + + case HIGH_POWER: + if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && + val < rps->cur_freq) + new_power = BETWEEN; + break; + } + /* Max/min bins are special */ + if (val <= rps->min_freq_softlimit) + new_power = LOW_POWER; + if (val >= rps->max_freq_softlimit) + new_power = HIGH_POWER; + + mutex_lock(&rps->power.mutex); + if (rps->power.interactive) + new_power = HIGH_POWER; + rps_set_power(rps, new_power); + mutex_unlock(&rps->power.mutex); +} + +void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) +{ + mutex_lock(&rps->power.mutex); + if (interactive) { + if (!rps->power.interactive++ && rps->active) + rps_set_power(rps, HIGH_POWER); + } else { + GEM_BUG_ON(!rps->power.interactive); + rps->power.interactive--; + } + mutex_unlock(&rps->power.mutex); +} + +static int gen6_rps_set(struct intel_rps *rps, u8 val) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 swreq; + + if (INTEL_GEN(i915) >= 9) + swreq = GEN9_FREQUENCY(val); + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + swreq = HSW_FREQUENCY(val); + else + swreq = (GEN6_FREQUENCY(val) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + set(uncore, GEN6_RPNSWREQ, swreq); + + return 0; +} + +static int vlv_rps_set(struct intel_rps *rps, u8 val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + int err; + + vlv_punit_get(i915); + err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); + vlv_punit_put(i915); + + return err; +} + +static int rps_set(struct intel_rps *rps, u8 val, bool update) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + int err; + + if (INTEL_GEN(i915) < 6) + return 0; + + if (val == rps->last_freq) + return 0; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + err = vlv_rps_set(rps, val); + else + err = gen6_rps_set(rps, val); + if (err) + return err; + + if (update) + gen6_rps_set_thresholds(rps, val); + rps->last_freq = val; + + return 0; +} + +void intel_rps_unpark(struct intel_rps *rps) +{ + u8 freq; + + if (!rps->enabled) + return; + + /* + * Use the user's desired frequency as a guide, but for better + * performance, jump directly to RPe as our starting frequency. + */ + mutex_lock(&rps->lock); + rps->active = true; + freq = max(rps->cur_freq, rps->efficient_freq), + freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit); + intel_rps_set(rps, freq); + rps->last_adj = 0; + mutex_unlock(&rps->lock); + + if (INTEL_GEN(rps_to_i915(rps)) >= 6) + rps_enable_interrupts(rps); + + if (IS_GEN(rps_to_i915(rps), 5)) + gen5_rps_update(rps); +} + +void intel_rps_park(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (!rps->enabled) + return; + + if (INTEL_GEN(i915) >= 6) + rps_disable_interrupts(rps); + + rps->active = false; + if (rps->last_freq <= rps->idle_freq) + return; + + /* + * The punit delays the write of the frequency and voltage until it + * determines the GPU is awake. During normal usage we don't want to + * waste power changing the frequency if the GPU is sleeping (rc6). + * However, the GPU and driver is now idle and we do not want to delay + * switching to minimum voltage (reducing power whilst idle) as we do + * not expect to be woken in the near future and so must flush the + * change by waking the device. + * + * We choose to take the media powerwell (either would do to trick the + * punit into committing the voltage change) as that takes a lot less + * power than the render powerwell. + */ + intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); + rps_set(rps, rps->idle_freq, false); + intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); +} + +void intel_rps_boost(struct i915_request *rq) +{ + struct intel_rps *rps = &rq->engine->gt->rps; + unsigned long flags; + + if (i915_request_signaled(rq) || !rps->active) + return; + + /* Serializes with i915_request_retire() */ + spin_lock_irqsave(&rq->lock, flags); + if (!i915_request_has_waitboost(rq) && + !dma_fence_is_signaled_locked(&rq->fence)) { + rq->flags |= I915_REQUEST_WAITBOOST; + + if (!atomic_fetch_inc(&rps->num_waiters) && + READ_ONCE(rps->cur_freq) < rps->boost_freq) + schedule_work(&rps->work); + + atomic_inc(&rps->boosts); + } + spin_unlock_irqrestore(&rq->lock, flags); +} + +int intel_rps_set(struct intel_rps *rps, u8 val) +{ + int err; + + lockdep_assert_held(&rps->lock); + GEM_BUG_ON(val > rps->max_freq); + GEM_BUG_ON(val < rps->min_freq); + + if (rps->active) { + err = rps_set(rps, val, true); + if (err) + return err; + + /* + * Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + if (INTEL_GEN(rps_to_i915(rps)) >= 6) { + struct intel_uncore *uncore = rps_to_uncore(rps); + + set(uncore, + GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); + + set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); + } + } + + rps->cur_freq = val; + return 0; +} + +static void gen6_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* All of these values are in units of 50MHz */ + + /* static values from HW: RP0 > RP1 > RPn (min_freq) */ + if (IS_GEN9_LP(i915)) { + u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); + + rps->rp0_freq = (rp_state_cap >> 16) & 0xff; + rps->rp1_freq = (rp_state_cap >> 8) & 0xff; + rps->min_freq = (rp_state_cap >> 0) & 0xff; + } else { + u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); + + rps->rp0_freq = (rp_state_cap >> 0) & 0xff; + rps->rp1_freq = (rp_state_cap >> 8) & 0xff; + rps->min_freq = (rp_state_cap >> 16) & 0xff; + } + + /* hw_max = RP0 until we check for overclocking */ + rps->max_freq = rps->rp0_freq; + + rps->efficient_freq = rps->rp1_freq; + if (IS_HASWELL(i915) || IS_BROADWELL(i915) || + IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + u32 ddcc_status = 0; + + if (sandybridge_pcode_read(i915, + HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status, NULL) == 0) + rps->efficient_freq = + clamp_t(u8, + (ddcc_status >> 8) & 0xff, + rps->min_freq, + rps->max_freq); + } + + if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + /* Store the frequency values in 16.66 MHZ units, which is + * the natural hardware unit for SKL + */ + rps->rp0_freq *= GEN9_FREQ_SCALER; + rps->rp1_freq *= GEN9_FREQ_SCALER; + rps->min_freq *= GEN9_FREQ_SCALER; + rps->max_freq *= GEN9_FREQ_SCALER; + rps->efficient_freq *= GEN9_FREQ_SCALER; + } +} + +static bool rps_reset(struct intel_rps *rps) +{ + /* force a reset */ + rps->power.mode = -1; + rps->last_freq = -1; + + if (rps_set(rps, rps->min_freq, true)) { + DRM_ERROR("Failed to reset RPS to initial values\n"); + return false; + } + + rps->cur_freq = rps->min_freq; + return true; +} + +/* See the Gen9_GT_PM_Programming_Guide doc for the below */ +static bool gen9_rps_enable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* Program defaults and thresholds for RPS */ + if (IS_GEN(i915, 9)) + intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, + GEN9_FREQUENCY(rps->rp1_freq)); + + /* 1 second timeout */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, + GT_INTERVAL_FROM_US(i915, 1000000)); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); + + return rps_reset(rps); +} + +static bool gen8_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, + HSW_FREQUENCY(rps->rp1_freq)); + + /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, + 100000000 / 128); /* 1 second timeout */ + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + return rps_reset(rps); +} + +static bool gen6_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* Power down if completely idle for over 50ms */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + return rps_reset(rps); +} + +static int chv_rps_max_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); + + switch (RUNTIME_INFO(i915)->sseu.eu_total) { + case 8: + /* (2 * 4) config */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; + break; + case 12: + /* (2 * 6) config */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; + break; + case 16: + /* (2 * 8) config */ + default: + /* Setting (2 * 8) Min RP0 for any other combination */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; + break; + } + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static int chv_rps_rpe_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); + val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; + + return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; +} + +static int chv_rps_guar_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static u32 chv_rps_min_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); + val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static bool chv_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + /* 1: Program defaults and thresholds for RPS*/ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + /* 2: Enable RPS */ + intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + + /* Setting Fixed Bias */ + vlv_punit_get(i915); + + val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; + vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + + vlv_punit_put(i915); + + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + return rps_reset(rps); +} + +static int vlv_rps_guar_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rp1; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; + rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; + + return rp1; +} + +static int vlv_rps_max_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rp0; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; + /* Clamp to max */ + rp0 = min_t(u32, rp0, 0xea); + + return rp0; +} + +static int vlv_rps_rpe_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rpe; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); + rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); + rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; + + return rpe; +} + +static int vlv_rps_min_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; + /* + * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value + * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on + * a BYT-M B0 the above register contains 0xbf. Moreover when setting + * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 + * to make sure it matches what Punit accepts. + */ + return max_t(u32, val, 0xc0); +} + +static bool vlv_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_CONT); + + vlv_punit_get(i915); + + /* Setting Fixed Bias */ + val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; + vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + + vlv_punit_put(i915); + + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + return rps_reset(rps); +} + +static unsigned long __ips_gfx_val(struct intel_ips *ips) +{ + struct intel_rps *rps = container_of(ips, typeof(*rps), ips); + struct intel_uncore *uncore = rps_to_uncore(rps); + unsigned long t, corr, state1, corr2, state2; + u32 pxvid, ext_v; + + lockdep_assert_held(&mchdev_lock); + + pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); + pxvid = (pxvid >> 24) & 0x7f; + ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); + + state1 = ext_v; + + /* Revel in the empirically derived constants */ + + /* Correction factor in 1/100000 units */ + t = ips_mch_val(uncore); + if (t > 80) + corr = t * 2349 + 135940; + else if (t >= 50) + corr = t * 964 + 29317; + else /* < 50 */ + corr = t * 301 + 1004; + + corr = corr * 150142 * state1 / 10000 - 78642; + corr /= 100000; + corr2 = corr * ips->corr; + + state2 = corr2 * state1 / 10000; + state2 /= 100; /* convert to mW */ + + __gen5_ips_update(ips); + + return ips->gfx_power + state2; +} + +void intel_rps_enable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + if (IS_CHERRYVIEW(i915)) + rps->enabled = chv_rps_enable(rps); + else if (IS_VALLEYVIEW(i915)) + rps->enabled = vlv_rps_enable(rps); + else if (INTEL_GEN(i915) >= 9) + rps->enabled = gen9_rps_enable(rps); + else if (INTEL_GEN(i915) >= 8) + rps->enabled = gen8_rps_enable(rps); + else if (INTEL_GEN(i915) >= 6) + rps->enabled = gen6_rps_enable(rps); + else if (IS_IRONLAKE_M(i915)) + rps->enabled = gen5_rps_enable(rps); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + if (!rps->enabled) + return; + + WARN_ON(rps->max_freq < rps->min_freq); + WARN_ON(rps->idle_freq > rps->max_freq); + + WARN_ON(rps->efficient_freq < rps->min_freq); + WARN_ON(rps->efficient_freq > rps->max_freq); +} + +static void gen6_rps_disable(struct intel_rps *rps) +{ + set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); +} + +void intel_rps_disable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + rps->enabled = false; + + if (INTEL_GEN(i915) >= 6) + gen6_rps_disable(rps); + else if (IS_IRONLAKE_M(i915)) + gen5_rps_disable(rps); +} + +static int byt_gpu_freq(struct intel_rps *rps, int val) +{ + /* + * N = val - 0xb7 + * Slow = Fast = GPLL ref * N + */ + return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); +} + +static int byt_freq_opcode(struct intel_rps *rps, int val) +{ + return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; +} + +static int chv_gpu_freq(struct intel_rps *rps, int val) +{ + /* + * N = val / 2 + * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 + */ + return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); +} + +static int chv_freq_opcode(struct intel_rps *rps, int val) +{ + /* CHV needs even values */ + return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; +} + +int intel_gpu_freq(struct intel_rps *rps, int val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (INTEL_GEN(i915) >= 9) + return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, + GEN9_FREQ_SCALER); + else if (IS_CHERRYVIEW(i915)) + return chv_gpu_freq(rps, val); + else if (IS_VALLEYVIEW(i915)) + return byt_gpu_freq(rps, val); + else + return val * GT_FREQUENCY_MULTIPLIER; +} + +int intel_freq_opcode(struct intel_rps *rps, int val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (INTEL_GEN(i915) >= 9) + return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, + GT_FREQUENCY_MULTIPLIER); + else if (IS_CHERRYVIEW(i915)) + return chv_freq_opcode(rps, val); + else if (IS_VALLEYVIEW(i915)) + return byt_freq_opcode(rps, val); + else + return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); +} + +static void vlv_init_gpll_ref_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + rps->gpll_ref_freq = + vlv_get_cck_clock(i915, "GPLL ref", + CCK_GPLL_CLOCK_CONTROL, + i915->czclk_freq); + + DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq); +} + +static void vlv_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + vlv_iosf_sb_get(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + vlv_init_gpll_ref_freq(rps); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + switch ((val >> 6) & 3) { + case 0: + case 1: + i915->mem_freq = 800; + break; + case 2: + i915->mem_freq = 1066; + break; + case 3: + i915->mem_freq = 1333; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + + rps->max_freq = vlv_rps_max_freq(rps); + rps->rp0_freq = rps->max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), + rps->max_freq); + + rps->efficient_freq = vlv_rps_rpe_freq(rps); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), + rps->efficient_freq); + + rps->rp1_freq = vlv_rps_guar_freq(rps); + DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), + rps->rp1_freq); + + rps->min_freq = vlv_rps_min_freq(rps); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), + rps->min_freq); + + vlv_iosf_sb_put(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); +} + +static void chv_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + vlv_iosf_sb_get(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + vlv_init_gpll_ref_freq(rps); + + val = vlv_cck_read(i915, CCK_FUSE_REG); + + switch ((val >> 2) & 0x7) { + case 3: + i915->mem_freq = 2000; + break; + default: + i915->mem_freq = 1600; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + + rps->max_freq = chv_rps_max_freq(rps); + rps->rp0_freq = rps->max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), + rps->max_freq); + + rps->efficient_freq = chv_rps_rpe_freq(rps); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), + rps->efficient_freq); + + rps->rp1_freq = chv_rps_guar_freq(rps); + DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), + rps->rp1_freq); + + rps->min_freq = chv_rps_min_freq(rps); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), + rps->min_freq); + + vlv_iosf_sb_put(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq | + rps->min_freq) & 1, + "Odd GPU freq values\n"); +} + +static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) +{ + ei->ktime = ktime_get_raw(); + ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); + ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); +} + +static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + const struct intel_rps_ei *prev = &rps->ei; + struct intel_rps_ei now; + u32 events = 0; + + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) + return 0; + + vlv_c0_read(uncore, &now); + + if (prev->ktime) { + u64 time, c0; + u32 render, media; + + time = ktime_us_delta(now.ktime, prev->ktime); + + time *= rps_to_i915(rps)->czclk_freq; + + /* Workload can be split between render + media, + * e.g. SwapBuffers being blitted in X after being rendered in + * mesa. To account for this we need to combine both engines + * into our activity counter. + */ + render = now.render_c0 - prev->render_c0; + media = now.media_c0 - prev->media_c0; + c0 = max(render, media); + c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ + + if (c0 > time * rps->power.up_threshold) + events = GEN6_PM_RP_UP_THRESHOLD; + else if (c0 < time * rps->power.down_threshold) + events = GEN6_PM_RP_DOWN_THRESHOLD; + } + + rps->ei = now; + return events; +} + +static void rps_work(struct work_struct *work) +{ + struct intel_rps *rps = container_of(work, typeof(*rps), work); + struct intel_gt *gt = rps_to_gt(rps); + bool client_boost = false; + int new_freq, adj, min, max; + u32 pm_iir = 0; + + spin_lock_irq(>->irq_lock); + pm_iir = fetch_and_zero(&rps->pm_iir); + client_boost = atomic_read(&rps->num_waiters); + spin_unlock_irq(>->irq_lock); + + /* Make sure we didn't queue anything we're not going to process. */ + if ((pm_iir & rps->pm_events) == 0 && !client_boost) + goto out; + + mutex_lock(&rps->lock); + + pm_iir |= vlv_wa_c0_ei(rps, pm_iir); + + adj = rps->last_adj; + new_freq = rps->cur_freq; + min = rps->min_freq_softlimit; + max = rps->max_freq_softlimit; + if (client_boost) + max = rps->max_freq; + if (client_boost && new_freq < rps->boost_freq) { + new_freq = rps->boost_freq; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (adj > 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; + + if (new_freq >= rps->max_freq_softlimit) + adj = 0; + } else if (client_boost) { + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { + if (rps->cur_freq > rps->efficient_freq) + new_freq = rps->efficient_freq; + else if (rps->cur_freq > rps->min_freq_softlimit) + new_freq = rps->min_freq_softlimit; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { + if (adj < 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; + + if (new_freq <= rps->min_freq_softlimit) + adj = 0; + } else { /* unknown event */ + adj = 0; + } + + rps->last_adj = adj; + + /* + * Limit deboosting and boosting to keep ourselves at the extremes + * when in the respective power modes (i.e. slowly decrease frequencies + * while in the HIGH_POWER zone and slowly increase frequencies while + * in the LOW_POWER zone). On idle, we will hit the timeout and drop + * to the next level quickly, and conversely if busy we expect to + * hit a waitboost and rapidly switch into max power. + */ + if ((adj < 0 && rps->power.mode == HIGH_POWER) || + (adj > 0 && rps->power.mode == LOW_POWER)) + rps->last_adj = 0; + + /* sysfs frequency interfaces may have snuck in while servicing the + * interrupt + */ + new_freq += adj; + new_freq = clamp_t(int, new_freq, min, max); + + if (intel_rps_set(rps, new_freq)) { + DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); + rps->last_adj = 0; + } + + mutex_unlock(&rps->lock); + +out: + spin_lock_irq(>->irq_lock); + gen6_gt_pm_unmask_irq(gt, rps->pm_events); + spin_unlock_irq(>->irq_lock); +} + +void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_gt *gt = rps_to_gt(rps); + const u32 events = rps->pm_events & pm_iir; + + lockdep_assert_held(>->irq_lock); + + if (unlikely(!events)) + return; + + gen6_gt_pm_mask_irq(gt, events); + + rps->pm_iir |= events; + schedule_work(&rps->work); +} + +void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_gt *gt = rps_to_gt(rps); + + if (pm_iir & rps->pm_events) { + spin_lock(>->irq_lock); + gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events); + rps->pm_iir |= pm_iir & rps->pm_events; + schedule_work(&rps->work); + spin_unlock(>->irq_lock); + } + + if (INTEL_GEN(gt->i915) >= 8) + return; + + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + intel_engine_signal_breadcrumbs(gt->engine[VECS0]); + + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); +} + +void gen5_rps_irq_handler(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u32 busy_up, busy_down, max_avg, min_avg; + u8 new_freq; + + spin_lock(&mchdev_lock); + + intel_uncore_write16(uncore, + MEMINTRSTS, + intel_uncore_read(uncore, MEMINTRSTS)); + + intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); + busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); + max_avg = intel_uncore_read(uncore, RCBMAXAVG); + min_avg = intel_uncore_read(uncore, RCBMINAVG); + + /* Handle RCS change request from hw */ + new_freq = rps->cur_freq; + if (busy_up > max_avg) + new_freq++; + else if (busy_down < min_avg) + new_freq--; + new_freq = clamp(new_freq, + rps->min_freq_softlimit, + rps->max_freq_softlimit); + + if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) + rps->cur_freq = new_freq; + + spin_unlock(&mchdev_lock); +} + +void intel_rps_init_early(struct intel_rps *rps) +{ + mutex_init(&rps->lock); + mutex_init(&rps->power.mutex); + + INIT_WORK(&rps->work, rps_work); + + atomic_set(&rps->num_waiters, 0); +} + +void intel_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (IS_CHERRYVIEW(i915)) + chv_rps_init(rps); + else if (IS_VALLEYVIEW(i915)) + vlv_rps_init(rps); + else if (INTEL_GEN(i915) >= 6) + gen6_rps_init(rps); + else if (IS_IRONLAKE_M(i915)) + gen5_rps_init(rps); + + /* Derive initial user preferences/limits from the hardware limits */ + rps->max_freq_softlimit = rps->max_freq; + rps->min_freq_softlimit = rps->min_freq; + + /* After setting max-softlimit, find the overclock max freq */ + if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { + u32 params = 0; + + sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, + ¶ms, NULL); + if (params & BIT(31)) { /* OC supported */ + DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", + (rps->max_freq & 0xff) * 50, + (params & 0xff) * 50); + rps->max_freq = params & 0xff; + } + } + + /* Finally allow us to boost to max by default */ + rps->boost_freq = rps->max_freq; + rps->idle_freq = rps->min_freq; + rps->cur_freq = rps->idle_freq; + + rps->pm_intrmsk_mbz = 0; + + /* + * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_GEN(i915) <= 7) + rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11) + rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; +} + +u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 cagf; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + cagf = (rpstat >> 8) & 0xff; + else if (INTEL_GEN(i915) >= 9) + cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; + else + cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; + + return cagf; +} + +static u32 read_cagf(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 freq; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + vlv_punit_get(i915); + freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + vlv_punit_put(i915); + } else { + freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1); + } + + return intel_rps_get_cagf(rps, freq); +} + +u32 intel_rps_read_actual_frequency(struct intel_rps *rps) +{ + struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm; + intel_wakeref_t wakeref; + u32 freq = 0; + + with_intel_runtime_pm_if_in_use(rpm, wakeref) + freq = intel_gpu_freq(rps, read_cagf(rps)); + + return freq; +} + +/* External interface for intel_ips.ko */ + +static struct drm_i915_private __rcu *ips_mchdev; + +/** + * Tells the intel_ips driver that the i915 driver is now loaded, if + * IPS got loaded first. + * + * This awkward dance is so that neither module has to depend on the + * other in order for IPS to do the appropriate communication of + * GPU turbo limits to i915. + */ +static void +ips_ping_for_i915_load(void) +{ + void (*link)(void); + + link = symbol_get(ips_link_to_i915_driver); + if (link) { + link(); + symbol_put(ips_link_to_i915_driver); + } +} + +void intel_rps_driver_register(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + /* + * We only register the i915 ips part with intel-ips once everything is + * set up, to avoid intel-ips sneaking in and reading bogus values. + */ + if (IS_GEN(gt->i915, 5)) { + GEM_BUG_ON(ips_mchdev); + rcu_assign_pointer(ips_mchdev, gt->i915); + ips_ping_for_i915_load(); + } +} + +void intel_rps_driver_unregister(struct intel_rps *rps) +{ + if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) + rcu_assign_pointer(ips_mchdev, NULL); +} + +static struct drm_i915_private *mchdev_get(void) +{ + struct drm_i915_private *i915; + + rcu_read_lock(); + i915 = rcu_dereference(ips_mchdev); + if (!kref_get_unless_zero(&i915->drm.ref)) + i915 = NULL; + rcu_read_unlock(); + + return i915; +} + +/** + * i915_read_mch_val - return value for IPS use + * + * Calculate and return a value for the IPS driver to use when deciding whether + * we have thermal and power headroom to increase CPU or GPU power budget. + */ +unsigned long i915_read_mch_val(void) +{ + struct drm_i915_private *i915; + unsigned long chipset_val = 0; + unsigned long graphics_val = 0; + intel_wakeref_t wakeref; + + i915 = mchdev_get(); + if (!i915) + return 0; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + struct intel_ips *ips = &i915->gt.rps.ips; + + spin_lock_irq(&mchdev_lock); + chipset_val = __ips_chipset_val(ips); + graphics_val = __ips_gfx_val(ips); + spin_unlock_irq(&mchdev_lock); + } + + drm_dev_put(&i915->drm); + return chipset_val + graphics_val; +} +EXPORT_SYMBOL_GPL(i915_read_mch_val); + +/** + * i915_gpu_raise - raise GPU frequency limit + * + * Raise the limit; IPS indicates we have thermal headroom. + */ +bool i915_gpu_raise(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + if (rps->max_freq_softlimit < rps->max_freq) + rps->max_freq_softlimit++; + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return true; +} +EXPORT_SYMBOL_GPL(i915_gpu_raise); + +/** + * i915_gpu_lower - lower GPU frequency limit + * + * IPS indicates we're close to a thermal limit, so throttle back the GPU + * frequency maximum. + */ +bool i915_gpu_lower(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + if (rps->max_freq_softlimit > rps->min_freq) + rps->max_freq_softlimit--; + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return true; +} +EXPORT_SYMBOL_GPL(i915_gpu_lower); + +/** + * i915_gpu_busy - indicate GPU business to IPS + * + * Tell the IPS driver whether or not the GPU is busy. + */ +bool i915_gpu_busy(void) +{ + struct drm_i915_private *i915; + bool ret; + + i915 = mchdev_get(); + if (!i915) + return false; + + ret = i915->gt.awake; + + drm_dev_put(&i915->drm); + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_busy); + +/** + * i915_gpu_turbo_disable - disable graphics turbo + * + * Disable graphics turbo by resetting the max frequency and setting the + * current frequency to the default. + */ +bool i915_gpu_turbo_disable(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + bool ret; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + rps->max_freq_softlimit = rps->min_freq; + ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h new file mode 100644 index 000000000000..dfa98194f3b2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RPS_H +#define INTEL_RPS_H + +#include "intel_rps_types.h" + +struct i915_request; + +void intel_rps_init_early(struct intel_rps *rps); +void intel_rps_init(struct intel_rps *rps); + +void intel_rps_driver_register(struct intel_rps *rps); +void intel_rps_driver_unregister(struct intel_rps *rps); + +void intel_rps_enable(struct intel_rps *rps); +void intel_rps_disable(struct intel_rps *rps); + +void intel_rps_park(struct intel_rps *rps); +void intel_rps_unpark(struct intel_rps *rps); +void intel_rps_boost(struct i915_request *rq); + +int intel_rps_set(struct intel_rps *rps, u8 val); +void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); + +int intel_gpu_freq(struct intel_rps *rps, int val); +int intel_freq_opcode(struct intel_rps *rps, int val); +u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1); +u32 intel_rps_read_actual_frequency(struct intel_rps *rps); + +void gen5_rps_irq_handler(struct intel_rps *rps); +void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); +void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); + +#endif /* INTEL_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h new file mode 100644 index 000000000000..c2e279154bd5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RPS_TYPES_H +#define INTEL_RPS_TYPES_H + +#include <linux/atomic.h> +#include <linux/ktime.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct intel_ips { + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + u64 last_time2; + unsigned long gfx_power; + u8 corr; + + int c, m; +}; + +struct intel_rps_ei { + ktime_t ktime; + u32 render_c0; + u32 media_c0; +}; + +struct intel_rps { + struct mutex lock; /* protects enabling and the worker */ + + /* + * work, interrupts_enabled and pm_iir are protected by + * dev_priv->irq_lock + */ + struct work_struct work; + bool enabled; + bool active; + u32 pm_iir; + + /* PM interrupt bits that should never be masked */ + u32 pm_intrmsk_mbz; + u32 pm_events; + + /* Frequencies are stored in potentially platform dependent multiples. + * In other words, *_freq needs to be multiplied by X to be interesting. + * Soft limits are those which are used for the dynamic reclocking done + * by the driver (raise frequencies under heavy loads, and lower for + * lighter loads). Hard limits are those imposed by the hardware. + * + * A distinction is made for overclocking, which is never enabled by + * default, and is considered to be above the hard limit if it's + * possible at all. + */ + u8 cur_freq; /* Current frequency (cached, may not == HW) */ + u8 last_freq; /* Last SWREQ frequency */ + u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ + u8 max_freq_softlimit; /* Max frequency permitted by the driver */ + u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ + u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 boost_freq; /* Frequency to request when wait boosting */ + u8 idle_freq; /* Frequency to request when we are idle */ + u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ + u8 rp1_freq; /* "less than" RP0 power/freqency */ + u8 rp0_freq; /* Non-overclocked max frequency. */ + u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ + + int last_adj; + + struct { + struct mutex mutex; + + enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; + unsigned int interactive; + + u8 up_threshold; /* Current %busy required to uplock */ + u8 down_threshold; /* Current %busy required to downclock */ + } power; + + atomic_t num_waiters; + atomic_t boosts; + + /* manual wa residency calculations */ + struct intel_rps_ei ei; + struct intel_ips ips; +}; + +#endif /* INTEL_RPS_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 0f959694303c..ee5dc4fbdeb9 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -4,17 +4,20 @@ * Copyright © 2016-2018 Intel Corporation */ -#include "gt/intel_gt_types.h" - #include "i915_drv.h" #include "i915_active.h" #include "i915_syncmap.h" -#include "gt/intel_timeline.h" +#include "intel_gt.h" +#include "intel_ring.h" +#include "intel_timeline.h" #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) +#define CACHELINE_BITS 6 +#define CACHELINE_FREE CACHELINE_BITS + struct intel_timeline_hwsp { struct intel_gt *gt; struct intel_gt_timelines *gt_timelines; @@ -23,14 +26,6 @@ struct intel_timeline_hwsp { u64 free_bitmap; }; -struct intel_timeline_cacheline { - struct i915_active active; - struct intel_timeline_hwsp *hwsp; - void *vaddr; -#define CACHELINE_BITS 6 -#define CACHELINE_FREE CACHELINE_BITS -}; - static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; @@ -133,7 +128,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); i915_active_fini(&cl->active); - kfree(cl); + kfree_rcu(cl, rcu); } __i915_active_call @@ -254,7 +249,7 @@ int intel_timeline_init(struct intel_timeline *timeline, mutex_init(&timeline->mutex); - INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex); + INIT_ACTIVE_FENCE(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); @@ -262,7 +257,7 @@ int intel_timeline_init(struct intel_timeline *timeline, return 0; } -static void timelines_init(struct intel_gt *gt) +void intel_gt_init_timelines(struct intel_gt *gt) { struct intel_gt_timelines *timelines = >->timelines; @@ -273,15 +268,11 @@ static void timelines_init(struct intel_gt *gt) INIT_LIST_HEAD(&timelines->hwsp_free_list); } -void intel_timelines_init(struct drm_i915_private *i915) -{ - timelines_init(&i915->gt); -} - void intel_timeline_fini(struct intel_timeline *timeline) { GEM_BUG_ON(atomic_read(&timeline->pin_count)); GEM_BUG_ON(!list_empty(&timeline->requests)); + GEM_BUG_ON(timeline->retire); if (timeline->hwsp_cacheline) cacheline_free(timeline->hwsp_cacheline); @@ -337,34 +328,52 @@ int intel_timeline_pin(struct intel_timeline *tl) void intel_timeline_enter(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; - unsigned long flags; + /* + * Pretend we are serialised by the timeline->mutex. + * + * While generally true, there are a few exceptions to the rule + * for the engine->kernel_context being used to manage power + * transitions. As the engine_park may be called from under any + * timeline, it uses the power mutex as a global serialisation + * lock to prevent any other request entering its timeline. + * + * The rule is generally tl->mutex, otherwise engine->wakeref.mutex. + * + * However, intel_gt_retire_request() does not know which engine + * it is retiring along and so cannot partake in the engine-pm + * barrier, and there we use the tl->active_count as a means to + * pin the timeline in the active_list while the locks are dropped. + * Ergo, as that is outside of the engine-pm barrier, we need to + * use atomic to manipulate tl->active_count. + */ lockdep_assert_held(&tl->mutex); - GEM_BUG_ON(!atomic_read(&tl->pin_count)); - if (tl->active_count++) + + if (atomic_add_unless(&tl->active_count, 1, 0)) return; - GEM_BUG_ON(!tl->active_count); /* overflow? */ - spin_lock_irqsave(&timelines->lock, flags); - list_add(&tl->link, &timelines->active_list); - spin_unlock_irqrestore(&timelines->lock, flags); + spin_lock(&timelines->lock); + if (!atomic_fetch_inc(&tl->active_count)) + list_add_tail(&tl->link, &timelines->active_list); + spin_unlock(&timelines->lock); } void intel_timeline_exit(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; - unsigned long flags; + /* See intel_timeline_enter() */ lockdep_assert_held(&tl->mutex); - GEM_BUG_ON(!tl->active_count); - if (--tl->active_count) + GEM_BUG_ON(!atomic_read(&tl->active_count)); + if (atomic_add_unless(&tl->active_count, -1, 1)) return; - spin_lock_irqsave(&timelines->lock, flags); - list_del(&tl->link); - spin_unlock_irqrestore(&timelines->lock, flags); + spin_lock(&timelines->lock); + if (atomic_dec_and_test(&tl->active_count)) + list_del(&tl->link); + spin_unlock(&timelines->lock); /* * Since this timeline is idle, all bariers upon which we were waiting @@ -500,46 +509,35 @@ int intel_timeline_read_hwsp(struct i915_request *from, struct i915_request *to, u32 *hwsp) { - struct intel_timeline *tl; + struct intel_timeline_cacheline *cl; int err; + GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline)); + rcu_read_lock(); - tl = rcu_dereference(from->timeline); - if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref)) - tl = NULL; + cl = rcu_dereference(from->hwsp_cacheline); + if (unlikely(!i915_active_acquire_if_busy(&cl->active))) + goto unlock; /* seqno wrapped and completed! */ + if (unlikely(i915_request_completed(from))) + goto release; rcu_read_unlock(); - if (!tl) /* already completed */ - return 1; - GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl); - - err = -EBUSY; - if (mutex_trylock(&tl->mutex)) { - struct intel_timeline_cacheline *cl = from->hwsp_cacheline; - - if (i915_request_completed(from)) { - err = 1; - goto unlock; - } + err = cacheline_ref(cl, to); + if (err) + goto out; - err = cacheline_ref(cl, to); - if (err) - goto unlock; + *hwsp = i915_ggtt_offset(cl->hwsp->vma) + + ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES; - if (likely(cl == tl->hwsp_cacheline)) { - *hwsp = tl->hwsp_offset; - } else { /* across a seqno wrap, recover the original offset */ - *hwsp = i915_ggtt_offset(cl->hwsp->vma) + - ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * - CACHELINE_BYTES; - } +out: + i915_active_release(&cl->active); + return err; +release: + i915_active_release(&cl->active); unlock: - mutex_unlock(&tl->mutex); - } - intel_timeline_put(tl); - - return err; + rcu_read_unlock(); + return 1; } void intel_timeline_unpin(struct intel_timeline *tl) @@ -562,7 +560,7 @@ void __intel_timeline_free(struct kref *kref) kfree_rcu(timeline, rcu); } -static void timelines_fini(struct intel_gt *gt) +void intel_gt_fini_timelines(struct intel_gt *gt) { struct intel_gt_timelines *timelines = >->timelines; @@ -570,11 +568,6 @@ static void timelines_fini(struct intel_gt *gt) GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); } -void intel_timelines_fini(struct drm_i915_private *i915) -{ - timelines_fini(&i915->gt); -} - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "gt/selftests/mock_timeline.c" #include "gt/selftest_timeline.c" diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h index f583af1ba18d..f5b7eade3809 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -88,7 +88,7 @@ int intel_timeline_read_hwsp(struct i915_request *from, struct i915_request *until, u32 *hwsp_offset); -void intel_timelines_init(struct drm_i915_private *i915); -void intel_timelines_fini(struct drm_i915_private *i915); +void intel_gt_init_timelines(struct intel_gt *gt); +void intel_gt_fini_timelines(struct intel_gt *gt); #endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 98d9ee166379..02181c5020db 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -10,14 +10,15 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/rcupdate.h> #include <linux/types.h> #include "i915_active_types.h" -struct drm_i915_private; struct i915_vma; -struct intel_timeline_cacheline; struct i915_syncmap; +struct intel_gt; +struct intel_timeline_hwsp; struct intel_timeline { u64 fence_context; @@ -42,7 +43,7 @@ struct intel_timeline { * from the intel_context caller plus internal atomicity. */ atomic_t pin_count; - unsigned int active_count; + atomic_t active_count; const u32 *hwsp_seqno; struct i915_vma *hwsp_ggtt; @@ -66,6 +67,9 @@ struct intel_timeline { */ struct i915_active_fence last_request; + /** A chain of completed timelines ready for early retirement. */ + struct intel_timeline *retire; + /** * We track the most recent seqno that we wait on in every context so * that we only have to emit a new await and dependency on a more @@ -84,4 +88,13 @@ struct intel_timeline { struct rcu_head rcu; }; +struct intel_timeline_cacheline { + struct i915_active active; + + struct intel_timeline_hwsp *hwsp; + void *vaddr; + + struct rcu_head rcu; +}; + #endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index af8a8183154a..195ccf7db272 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -6,7 +6,9 @@ #include "i915_drv.h" #include "intel_context.h" +#include "intel_engine_pm.h" #include "intel_gt.h" +#include "intel_ring.h" #include "intel_workarounds.h" /** @@ -145,21 +147,27 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) } } -static void -wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, - u32 val) +static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val, u32 read_mask) { struct i915_wa wa = { .reg = reg, .mask = mask, .val = val, - .read = mask, + .read = read_mask, }; _wa_add(wal, &wa); } static void +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val) +{ + wa_add(wal, reg, mask, val, mask); +} + +static void wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val)); @@ -567,9 +575,24 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + u32 val; + /* Wa_1409142259:tgl */ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + + /* Wa_1604555607:tgl */ + val = intel_uncore_read(engine->uncore, FF_MODE2); + val &= ~FF_MODE2_TDS_TIMER_MASK; + val |= FF_MODE2_TDS_TIMER_128; + /* + * FIXME: FF_MODE2 register is not readable till TGL B0. We can + * enable verification of WA from the later steppings, which enables + * the read of FF_MODE2. + */ + wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val, + IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 : + FF_MODE2_TDS_TIMER_MASK); } static void @@ -1215,6 +1238,26 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) static void tgl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + switch (engine->class) { + case RENDER_CLASS: + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + break; + default: + break; + } } void intel_engine_init_whitelist(struct intel_engine_cs *engine) @@ -1294,6 +1337,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN6_RC_SLEEP_PSMI_CONTROL, GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | GEN8_RC_SEMA_IDLE_MSG_DISABLE); + + /* + * Wa_1606679103:tgl + * (see also Wa_1606682166:icl) + */ + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_SAMPLER_PREFETCH); } if (IS_GEN(i915, 11)) { @@ -1553,7 +1604,9 @@ static int engine_wa_list_verify(struct intel_context *ce, if (IS_ERR(vma)) return PTR_ERR(vma); + intel_engine_pm_get(ce->engine); rq = intel_context_create_request(ce); + intel_engine_pm_put(ce->engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_vma; @@ -1563,16 +1616,17 @@ static int engine_wa_list_verify(struct intel_context *ce, if (err) goto err_vma; + i915_request_get(rq); i915_request_add(rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { err = -ETIME; - goto err_vma; + goto err_rq; } results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(results)) { err = PTR_ERR(results); - goto err_vma; + goto err_rq; } err = 0; @@ -1586,6 +1640,8 @@ static int engine_wa_list_verify(struct intel_context *ce, i915_gem_object_unpin_map(vma->obj); +err_rq: + i915_request_put(rq); err_vma: i915_vma_unpin(vma); i915_vma_put(vma); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 123db2c3f956..4e1eafa94be9 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -23,6 +23,7 @@ */ #include "gem/i915_gem_context.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "intel_context.h" @@ -76,7 +77,7 @@ static void advance(struct i915_request *request) i915_request_mark_complete(request); GEM_BUG_ON(!i915_request_completed(request)); - intel_engine_queue_breadcrumbs(request->engine); + intel_engine_signal_breadcrumbs(request->engine); } static void hw_delay_complete(struct timer_list *t) @@ -206,16 +207,12 @@ static void mock_reset_prepare(struct intel_engine_cs *engine) { } -static void mock_reset(struct intel_engine_cs *engine, bool stalled) +static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled) { GEM_BUG_ON(stalled); } -static void mock_reset_finish(struct intel_engine_cs *engine) -{ -} - -static void mock_cancel_requests(struct intel_engine_cs *engine) +static void mock_reset_cancel(struct intel_engine_cs *engine) { struct i915_request *request; unsigned long flags; @@ -233,6 +230,24 @@ static void mock_cancel_requests(struct intel_engine_cs *engine) spin_unlock_irqrestore(&engine->active.lock, flags); } +static void mock_reset_finish(struct intel_engine_cs *engine) +{ +} + +static void mock_engine_release(struct intel_engine_cs *engine) +{ + struct mock_engine *mock = + container_of(engine, typeof(*mock), base); + + GEM_BUG_ON(timer_pending(&mock->hw_delay)); + + intel_context_unpin(engine->kernel_context); + intel_context_put(engine->kernel_context); + + intel_engine_fini_retire(engine); + intel_engine_fini_breadcrumbs(engine); +} + struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, const char *name, int id) @@ -264,9 +279,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.submit_request = mock_submit_request; engine->base.reset.prepare = mock_reset_prepare; - engine->base.reset.reset = mock_reset; + engine->base.reset.rewind = mock_reset_rewind; + engine->base.reset.cancel = mock_reset_cancel; engine->base.reset.finish = mock_reset_finish; - engine->base.cancel_requests = mock_cancel_requests; + + engine->base.release = mock_engine_release; i915->gt.engine[id] = &engine->base; i915->gt.engine_class[0][id] = &engine->base; @@ -289,6 +306,7 @@ int mock_engine_init(struct intel_engine_cs *engine) intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init__pm(engine); + intel_engine_init_retire(engine); intel_engine_pool_init(&engine->pool); ce = create_kernel_context(engine); @@ -320,18 +338,3 @@ void mock_engine_flush(struct intel_engine_cs *engine) void mock_engine_reset(struct intel_engine_cs *engine) { } - -void mock_engine_free(struct intel_engine_cs *engine) -{ - struct mock_engine *mock = - container_of(engine, typeof(*mock), base); - - GEM_BUG_ON(timer_pending(&mock->hw_delay)); - - intel_context_unpin(engine->kernel_context); - intel_context_put(engine->kernel_context); - - intel_engine_fini_breadcrumbs(engine); - - kfree(engine); -} diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index f63a26a3e620..e874dfaa5316 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -5,6 +5,7 @@ */ #include "i915_selftest.h" +#include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_gt.h" @@ -47,35 +48,36 @@ static int context_sync(struct intel_context *ce) mutex_lock(&tl->mutex); do { - struct dma_fence *fence; + struct i915_request *rq; long timeout; - fence = i915_active_fence_get(&tl->last_request); - if (!fence) + if (list_empty(&tl->requests)) break; - timeout = dma_fence_wait_timeout(fence, false, HZ / 10); + rq = list_last_entry(&tl->requests, typeof(*rq), link); + i915_request_get(rq); + + timeout = i915_request_wait(rq, 0, HZ / 10); if (timeout < 0) err = timeout; else - i915_request_retire_upto(to_request(fence)); + i915_request_retire_upto(rq); - dma_fence_put(fence); + i915_request_put(rq); } while (!err); mutex_unlock(&tl->mutex); return err; } -static int __live_context_size(struct intel_engine_cs *engine, - struct i915_gem_context *fixme) +static int __live_context_size(struct intel_engine_cs *engine) { struct intel_context *ce; struct i915_request *rq; void *vaddr; int err; - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); @@ -103,9 +105,6 @@ static int __live_context_size(struct intel_engine_cs *engine, * * TLDR; this overlaps with the execlists redzone. */ - if (HAS_EXECLISTS(engine->i915)) - vaddr += LRC_HEADER_PAGES * PAGE_SIZE; - vaddr += engine->context_size - I915_GTT_PAGE_SIZE; memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); @@ -121,7 +120,7 @@ static int __live_context_size(struct intel_engine_cs *engine, goto err_unpin; /* Force the context switch */ - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -146,7 +145,6 @@ static int live_context_size(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; enum intel_engine_id id; int err = 0; @@ -155,10 +153,6 @@ static int live_context_size(void *arg) * HW tries to write past the end of one. */ - fixme = kernel_context(gt->i915); - if (IS_ERR(fixme)) - return PTR_ERR(fixme); - for_each_engine(engine, gt, id) { struct { struct drm_i915_gem_object *state; @@ -183,7 +177,7 @@ static int live_context_size(void *arg) /* Overlaps with the execlists redzone */ engine->context_size += I915_GTT_PAGE_SIZE; - err = __live_context_size(engine, fixme); + err = __live_context_size(engine); engine->context_size -= I915_GTT_PAGE_SIZE; @@ -196,13 +190,12 @@ static int live_context_size(void *arg) break; } - kernel_context_close(fixme); return err; } -static int __live_active_context(struct intel_engine_cs *engine, - struct i915_gem_context *fixme) +static int __live_active_context(struct intel_engine_cs *engine) { + unsigned long saved_heartbeat; struct intel_context *ce; int pass; int err; @@ -226,40 +219,55 @@ static int __live_active_context(struct intel_engine_cs *engine, return -EINVAL; } - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); + saved_heartbeat = engine->props.heartbeat_interval_ms; + engine->props.heartbeat_interval_ms = 0; + for (pass = 0; pass <= 2; pass++) { struct i915_request *rq; + intel_engine_pm_get(engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err; + goto out_engine; } err = request_sync(rq); if (err) - goto err; + goto out_engine; /* Context will be kept active until after an idle-barrier. */ if (i915_active_is_idle(&ce->active)) { pr_err("context is not active; expected idle-barrier (%s pass %d)\n", engine->name, pass); err = -EINVAL; - goto err; + goto out_engine; } if (!intel_engine_pm_is_awake(engine)) { pr_err("%s is asleep before idle-barrier\n", engine->name); err = -EINVAL; - goto err; + goto out_engine; } + +out_engine: + intel_engine_pm_put(engine); + if (err) + goto err; } /* Now make sure our idle-barriers are flushed */ + err = intel_engine_flush_barriers(engine); + if (err) + goto err; + + /* Wait for the barrier and in the process wait for engine to park */ err = context_sync(engine->kernel_context); if (err) goto err; @@ -269,12 +277,15 @@ static int __live_active_context(struct intel_engine_cs *engine, err = -EINVAL; } + intel_engine_pm_flush(engine); + if (intel_engine_pm_is_awake(engine)) { struct drm_printer p = drm_debug_printer(__func__); intel_engine_dump(engine, &p, - "%s is still awake after idle-barriers\n", - engine->name); + "%s is still awake:%d after idle-barriers\n", + engine->name, + atomic_read(&engine->wakeref.count)); GEM_TRACE_DUMP(); err = -EINVAL; @@ -282,6 +293,7 @@ static int __live_active_context(struct intel_engine_cs *engine, } err: + engine->props.heartbeat_interval_ms = saved_heartbeat; intel_context_put(ce); return err; } @@ -290,23 +302,11 @@ static int live_active_context(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; enum intel_engine_id id; - struct drm_file *file; int err = 0; - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - fixme = live_context(gt->i915, file); - if (IS_ERR(fixme)) { - err = PTR_ERR(fixme); - goto out_file; - } - for_each_engine(engine, gt, id) { - err = __live_active_context(engine, fixme); + err = __live_active_context(engine); if (err) break; @@ -315,8 +315,6 @@ static int live_active_context(void *arg) break; } -out_file: - mock_file_free(gt->i915, file); return err; } @@ -348,10 +346,10 @@ unpin: return err; } -static int __live_remote_context(struct intel_engine_cs *engine, - struct i915_gem_context *fixme) +static int __live_remote_context(struct intel_engine_cs *engine) { struct intel_context *local, *remote; + unsigned long saved_heartbeat; int pass; int err; @@ -363,16 +361,26 @@ static int __live_remote_context(struct intel_engine_cs *engine, * clobber the idle-barrier. */ - remote = intel_context_create(fixme, engine); + if (intel_engine_pm_is_awake(engine)) { + pr_err("%s is awake before starting %s!\n", + engine->name, __func__); + return -EINVAL; + } + + remote = intel_context_create(engine); if (IS_ERR(remote)) return PTR_ERR(remote); - local = intel_context_create(fixme, engine); + local = intel_context_create(engine); if (IS_ERR(local)) { err = PTR_ERR(local); goto err_remote; } + saved_heartbeat = engine->props.heartbeat_interval_ms; + engine->props.heartbeat_interval_ms = 0; + intel_engine_pm_get(engine); + for (pass = 0; pass <= 2; pass++) { err = __remote_sync(local, remote); if (err) @@ -390,6 +398,9 @@ static int __live_remote_context(struct intel_engine_cs *engine, } } + intel_engine_pm_put(engine); + engine->props.heartbeat_interval_ms = saved_heartbeat; + intel_context_put(local); err_remote: intel_context_put(remote); @@ -400,23 +411,11 @@ static int live_remote_context(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; enum intel_engine_id id; - struct drm_file *file; int err = 0; - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - fixme = live_context(gt->i915, file); - if (IS_ERR(fixme)) { - err = PTR_ERR(fixme); - goto out_file; - } - for_each_engine(engine, gt, id) { - err = __live_remote_context(engine, fixme); + err = __live_remote_context(engine); if (err) break; @@ -425,8 +424,6 @@ static int live_remote_context(void *arg) break; } -out_file: - mock_file_free(gt->i915, file); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index 3880f07c29b8..f88e445a1cae 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -4,7 +4,365 @@ * Copyright © 2018 Intel Corporation */ -#include "../i915_selftest.h" +#include <linux/sort.h> + +#include "intel_gt_pm.h" +#include "intel_rps.h" + +#include "i915_selftest.h" +#include "selftests/igt_flush_test.h" + +#define COUNT 5 + +static int cmp_u32(const void *A, const void *B) +{ + const u32 *a = A, *b = B; + + return *a - *b; +} + +static void perf_begin(struct intel_gt *gt) +{ + intel_gt_pm_get(gt); + + /* Boost gpufreq to max [waitboost] and keep it fixed */ + atomic_inc(>->rps.num_waiters); + schedule_work(>->rps.work); + flush_work(>->rps.work); +} + +static int perf_end(struct intel_gt *gt) +{ + atomic_dec(>->rps.num_waiters); + intel_gt_pm_put(gt); + + return igt_flush_test(gt->i915); +} + +static int write_timestamp(struct i915_request *rq, int slot) +{ + u32 cmd; + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; + if (INTEL_GEN(rq->i915) >= 8) + cmd++; + *cs++ = cmd; + *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); + *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32); + *cs++ = 0; + + intel_ring_advance(rq, cs); + + return 0; +} + +static struct i915_vma *create_empty_batch(struct intel_context *ce) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *cs; + int err; + + obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cs = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_put; + } + + cs[0] = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(obj); + + vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_unpin; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_unpin; + + i915_gem_object_unpin_map(obj); + return vma; + +err_unpin: + i915_gem_object_unpin_map(obj); +err_put: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static u32 trifilter(u32 *a) +{ + u64 sum; + + sort(a, COUNT, sizeof(*a), cmp_u32, NULL); + + sum = mul_u32_u32(a[2], 2); + sum += a[1]; + sum += a[3]; + + return sum >> 2; +} + +static int perf_mi_bb_start(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ + return 0; + + perf_begin(gt); + for_each_engine(engine, gt, id) { + struct intel_context *ce = engine->kernel_context; + struct i915_vma *batch; + u32 cycles[COUNT]; + int i; + + intel_engine_pm_get(engine); + + batch = create_empty_batch(ce); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + intel_engine_pm_put(engine); + break; + } + + err = i915_vma_sync(batch); + if (err) { + intel_engine_pm_put(engine); + i915_vma_put(batch); + break; + } + + for (i = 0; i < ARRAY_SIZE(cycles); i++) { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + err = write_timestamp(rq, 2); + if (err) + goto out; + + err = rq->engine->emit_bb_start(rq, + batch->node.start, 8, + 0); + if (err) + goto out; + + err = write_timestamp(rq, 3); + if (err) + goto out; + +out: + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -EIO; + i915_request_put(rq); + if (err) + break; + + cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2]; + } + i915_vma_put(batch); + intel_engine_pm_put(engine); + if (err) + break; + + pr_info("%s: MI_BB_START cycles: %u\n", + engine->name, trifilter(cycles)); + } + if (perf_end(gt)) + err = -EIO; + + return err; +} + +static struct i915_vma *create_nop_batch(struct intel_context *ce) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *cs; + int err; + + obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cs = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_put; + } + + memset(cs, 0, SZ_64K); + cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(obj); + + vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_unpin; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_unpin; + + i915_gem_object_unpin_map(obj); + return vma; + +err_unpin: + i915_gem_object_unpin_map(obj); +err_put: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static int perf_mi_noop(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ + return 0; + + perf_begin(gt); + for_each_engine(engine, gt, id) { + struct intel_context *ce = engine->kernel_context; + struct i915_vma *base, *nop; + u32 cycles[COUNT]; + int i; + + intel_engine_pm_get(engine); + + base = create_empty_batch(ce); + if (IS_ERR(base)) { + err = PTR_ERR(base); + intel_engine_pm_put(engine); + break; + } + + err = i915_vma_sync(base); + if (err) { + i915_vma_put(base); + intel_engine_pm_put(engine); + break; + } + + nop = create_nop_batch(ce); + if (IS_ERR(nop)) { + err = PTR_ERR(nop); + i915_vma_put(base); + intel_engine_pm_put(engine); + break; + } + + err = i915_vma_sync(nop); + if (err) { + i915_vma_put(nop); + i915_vma_put(base); + intel_engine_pm_put(engine); + break; + } + + for (i = 0; i < ARRAY_SIZE(cycles); i++) { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + err = write_timestamp(rq, 2); + if (err) + goto out; + + err = rq->engine->emit_bb_start(rq, + base->node.start, 8, + 0); + if (err) + goto out; + + err = write_timestamp(rq, 3); + if (err) + goto out; + + err = rq->engine->emit_bb_start(rq, + nop->node.start, + nop->node.size, + 0); + if (err) + goto out; + + err = write_timestamp(rq, 4); + if (err) + goto out; + +out: + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -EIO; + i915_request_put(rq); + if (err) + break; + + cycles[i] = + (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) - + (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]); + } + i915_vma_put(nop); + i915_vma_put(base); + intel_engine_pm_put(engine); + if (err) + break; + + pr_info("%s: 16K MI_NOOP cycles: %u\n", + engine->name, trifilter(cycles)); + } + if (perf_end(gt)) + err = -EIO; + + return err; +} + +int intel_engine_cs_perf_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(perf_mi_bb_start), + SUBTEST(perf_mi_noop), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} static int intel_mmio_bases_check(void *arg) { diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c new file mode 100644 index 000000000000..43d4d589749f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -0,0 +1,374 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/sort.h> + +#include "i915_drv.h" + +#include "intel_gt_requests.h" +#include "i915_selftest.h" + +static int timeline_sync(struct intel_timeline *tl) +{ + struct dma_fence *fence; + long timeout; + + fence = i915_active_fence_get(&tl->last_request); + if (!fence) + return 0; + + timeout = dma_fence_wait_timeout(fence, true, HZ / 2); + dma_fence_put(fence); + if (timeout < 0) + return timeout; + + return 0; +} + +static int engine_sync_barrier(struct intel_engine_cs *engine) +{ + return timeline_sync(engine->kernel_context->timeline); +} + +struct pulse { + struct i915_active active; + struct kref kref; +}; + +static int pulse_active(struct i915_active *active) +{ + kref_get(&container_of(active, struct pulse, active)->kref); + return 0; +} + +static void pulse_free(struct kref *kref) +{ + kfree(container_of(kref, struct pulse, kref)); +} + +static void pulse_put(struct pulse *p) +{ + kref_put(&p->kref, pulse_free); +} + +static void pulse_retire(struct i915_active *active) +{ + pulse_put(container_of(active, struct pulse, active)); +} + +static struct pulse *pulse_create(void) +{ + struct pulse *p; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return p; + + kref_init(&p->kref); + i915_active_init(&p->active, pulse_active, pulse_retire); + + return p; +} + +static void pulse_unlock_wait(struct pulse *p) +{ + i915_active_unlock_wait(&p->active); +} + +static int __live_idle_pulse(struct intel_engine_cs *engine, + int (*fn)(struct intel_engine_cs *cs)) +{ + struct pulse *p; + int err; + + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + p = pulse_create(); + if (!p) + return -ENOMEM; + + err = i915_active_acquire(&p->active); + if (err) + goto out; + + err = i915_active_acquire_preallocate_barrier(&p->active, engine); + if (err) { + i915_active_release(&p->active); + goto out; + } + + i915_active_acquire_barrier(&p->active); + i915_active_release(&p->active); + + GEM_BUG_ON(i915_active_is_idle(&p->active)); + GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); + + err = fn(engine); + if (err) + goto out; + + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); + + if (engine_sync_barrier(engine)) { + struct drm_printer m = drm_err_printer("pulse"); + + pr_err("%s: no heartbeat pulse?\n", engine->name); + intel_engine_dump(engine, &m, "%s", engine->name); + + err = -ETIME; + goto out; + } + + GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial); + + pulse_unlock_wait(p); /* synchronize with the retirement callback */ + + if (!i915_active_is_idle(&p->active)) { + struct drm_printer m = drm_err_printer("pulse"); + + pr_err("%s: heartbeat pulse did not flush idle tasks\n", + engine->name); + i915_active_print(&p->active, &m); + + err = -EINVAL; + goto out; + } + +out: + pulse_put(p); + return err; +} + +static int live_idle_flush(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that we can flush the idle barriers */ + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_idle_pulse(engine, intel_engine_flush_barriers); + intel_engine_pm_put(engine); + if (err) + break; + } + + return err; +} + +static int live_idle_pulse(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that heartbeat pulses flush the idle barriers */ + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_idle_pulse(engine, intel_engine_pulse); + intel_engine_pm_put(engine); + if (err && err != -ENODEV) + break; + + err = 0; + } + + return err; +} + +static int cmp_u32(const void *_a, const void *_b) +{ + const u32 *a = _a, *b = _b; + + return *a - *b; +} + +static int __live_heartbeat_fast(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct i915_request *rq; + ktime_t t0, t1; + u32 times[5]; + int err; + int i; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + intel_engine_pm_get(engine); + + err = intel_engine_set_heartbeat(engine, 1); + if (err) + goto err_pm; + + for (i = 0; i < ARRAY_SIZE(times); i++) { + /* Manufacture a tick */ + do { + while (READ_ONCE(engine->heartbeat.systole)) + flush_delayed_work(&engine->heartbeat.work); + + engine->serial++; /* quick, pretend we are not idle! */ + flush_delayed_work(&engine->heartbeat.work); + if (!delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat did not start\n", + engine->name); + err = -EINVAL; + goto err_pm; + } + + rcu_read_lock(); + rq = READ_ONCE(engine->heartbeat.systole); + if (rq) + rq = i915_request_get_rcu(rq); + rcu_read_unlock(); + } while (!rq); + + t0 = ktime_get(); + while (rq == READ_ONCE(engine->heartbeat.systole)) + yield(); /* work is on the local cpu! */ + t1 = ktime_get(); + + i915_request_put(rq); + times[i] = ktime_us_delta(t1, t0); + } + + sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL); + + pr_info("%s: Heartbeat delay: %uus [%u, %u]\n", + engine->name, + times[ARRAY_SIZE(times) / 2], + times[0], + times[ARRAY_SIZE(times) - 1]); + + /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */ + if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) { + pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n", + engine->name, + times[ARRAY_SIZE(times) / 2], + jiffies_to_usecs(6)); + err = -EINVAL; + } + + intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL); +err_pm: + intel_engine_pm_put(engine); + intel_context_put(ce); + return err; +} + +static int live_heartbeat_fast(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that the heartbeat ticks at the desired rate. */ + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) + return 0; + + for_each_engine(engine, gt, id) { + err = __live_heartbeat_fast(engine); + if (err) + break; + } + + return err; +} + +static int __live_heartbeat_off(struct intel_engine_cs *engine) +{ + int err; + + intel_engine_pm_get(engine); + + engine->serial++; + flush_delayed_work(&engine->heartbeat.work); + if (!delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat not running\n", + engine->name); + err = -EINVAL; + goto err_pm; + } + + err = intel_engine_set_heartbeat(engine, 0); + if (err) + goto err_pm; + + engine->serial++; + flush_delayed_work(&engine->heartbeat.work); + if (delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat still running\n", + engine->name); + err = -EINVAL; + goto err_beat; + } + + if (READ_ONCE(engine->heartbeat.systole)) { + pr_err("%s: heartbeat still allocated\n", + engine->name); + err = -EINVAL; + goto err_beat; + } + +err_beat: + intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL); +err_pm: + intel_engine_pm_put(engine); + return err; +} + +static int live_heartbeat_off(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that we can turn off heartbeat and not interrupt VIP */ + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) + return 0; + + for_each_engine(engine, gt, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + err = __live_heartbeat_off(engine); + if (err) + break; + } + + return err; +} + +int intel_heartbeat_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_idle_flush), + SUBTEST(live_idle_pulse), + SUBTEST(live_heartbeat_fast), + SUBTEST(live_heartbeat_off), + }; + int saved_hangcheck; + int err; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + saved_hangcheck = i915_modparams.enable_hangcheck; + i915_modparams.enable_hangcheck = INT_MAX; + + err = intel_gt_live_subtests(tests, &i915->gt); + + i915_modparams.enable_hangcheck = saved_hangcheck; + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 20b9c83f43ad..cbf6b0735272 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -51,11 +51,12 @@ static int live_engine_pm(void *arg) pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n", engine->name, p->name); else - intel_engine_pm_put(engine); - intel_engine_pm_put(engine); + intel_engine_pm_put_async(engine); + intel_engine_pm_put_async(engine); p->critical_section_end(); - /* engine wakeref is sync (instant) */ + intel_engine_pm_flush(engine); + if (intel_engine_pm_is_awake(engine)) { pr_err("%s is still awake after flushing pm\n", engine->name); diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 5d429037cdad..09ff8e4f88af 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -6,6 +6,7 @@ */ #include "selftest_llc.h" +#include "selftest_rc6.h" static int live_gt_resume(void *arg) { @@ -15,7 +16,8 @@ static int live_gt_resume(void *arg) /* Do several suspend/resume cycles to check we don't explode! */ do { - intel_gt_suspend(gt); + intel_gt_suspend_prepare(gt); + intel_gt_suspend_late(gt); if (gt->rc6.enabled) { pr_err("rc6 still enabled after suspend!\n"); @@ -49,6 +51,7 @@ static int live_gt_resume(void *arg) int intel_gt_pm_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(live_rc6_manual), SUBTEST(live_gt_resume), }; @@ -57,3 +60,20 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) return intel_gt_live_subtests(tests, &i915->gt); } + +int intel_gt_pm_late_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + /* + * These tests may leave the system in an undesirable state. + * They are intended to be run last in CI and the system + * rebooted afterwards. + */ + SUBTEST(live_rc6_ctx_wa), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 8e0016464325..5dbda2a74272 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -25,7 +25,9 @@ #include <linux/kthread.h> #include "gem/i915_gem_context.h" -#include "gt/intel_gt.h" + +#include "intel_gt.h" +#include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "i915_selftest.h" @@ -308,6 +310,24 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq) 1000)); } +static void engine_heartbeat_disable(struct intel_engine_cs *engine, + unsigned long *saved) +{ + *saved = engine->props.heartbeat_interval_ms; + engine->props.heartbeat_interval_ms = 0; + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); +} + +static void engine_heartbeat_enable(struct intel_engine_cs *engine, + unsigned long saved) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = saved; +} + static int igt_hang_sanitycheck(void *arg) { struct intel_gt *gt = arg; @@ -377,36 +397,30 @@ static int igt_reset_nop(void *arg) struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; unsigned int reset_count, count; enum intel_engine_id id; - struct drm_file *file; IGT_TIMEOUT(end_time); int err = 0; /* Check that we can reset during non-user portions of requests */ - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(gt->i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); reset_count = i915_reset_count(global); count = 0; do { for_each_engine(engine, gt, id) { + struct intel_context *ce; int i; + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + for (i = 0; i < 16; i++) { struct i915_request *rq; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); break; @@ -414,6 +428,8 @@ static int igt_reset_nop(void *arg) i915_request_add(rq); } + + intel_context_put(ce); } igt_global_reset_lock(gt); @@ -437,10 +453,7 @@ static int igt_reset_nop(void *arg) } while (time_before(jiffies, end_time)); pr_info("%s: %d resets\n", __func__, count); - err = igt_flush_test(gt->i915); -out: - mock_file_free(gt->i915, file); - if (intel_gt_is_wedged(gt)) + if (igt_flush_test(gt->i915)) err = -EIO; return err; } @@ -450,36 +463,29 @@ static int igt_reset_nop_engine(void *arg) struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; enum intel_engine_id id; - struct drm_file *file; - int err = 0; /* Check that we can engine-reset during non-user portions */ if (!intel_has_reset_engine(gt)) return 0; - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(gt->i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); for_each_engine(engine, gt, id) { - unsigned int reset_count, reset_engine_count; - unsigned int count; + unsigned int reset_count, reset_engine_count, count; + struct intel_context *ce; + unsigned long heartbeat; IGT_TIMEOUT(end_time); + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); reset_count = i915_reset_count(global); reset_engine_count = i915_reset_engine_count(global, engine); count = 0; + engine_heartbeat_disable(engine, &heartbeat); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { int i; @@ -494,7 +500,7 @@ static int igt_reset_nop_engine(void *arg) for (i = 0; i < 16; i++) { struct i915_request *rq; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); break; @@ -523,22 +529,18 @@ static int igt_reset_nop_engine(void *arg) } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - pr_info("%s(%s): %d resets\n", __func__, engine->name, count); + engine_heartbeat_enable(engine, heartbeat); - if (err) - break; + pr_info("%s(%s): %d resets\n", __func__, engine->name, count); - err = igt_flush_test(gt->i915); + intel_context_put(ce); + if (igt_flush_test(gt->i915)) + err = -EIO; if (err) - break; + return err; } - err = igt_flush_test(gt->i915); -out: - mock_file_free(gt->i915, file); - if (intel_gt_is_wedged(gt)) - err = -EIO; - return err; + return 0; } static int __igt_reset_engine(struct intel_gt *gt, bool active) @@ -562,6 +564,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) for_each_engine(engine, gt, id) { unsigned int reset_count, reset_engine_count; + unsigned long heartbeat; IGT_TIMEOUT(end_time); if (active && !intel_engine_can_store_dword(engine)) @@ -577,7 +580,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_count = i915_reset_count(global); reset_engine_count = i915_reset_engine_count(global, engine); - intel_engine_pm_get(engine); + engine_heartbeat_disable(engine, &heartbeat); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { if (active) { @@ -629,7 +632,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - intel_engine_pm_put(engine); + engine_heartbeat_enable(engine, heartbeat); if (err) break; @@ -699,43 +702,43 @@ static int active_engine(void *data) struct active_engine *arg = data; struct intel_engine_cs *engine = arg->engine; struct i915_request *rq[8] = {}; - struct i915_gem_context *ctx[ARRAY_SIZE(rq)]; - struct drm_file *file; - unsigned long count = 0; + struct intel_context *ce[ARRAY_SIZE(rq)]; + unsigned long count; int err = 0; - file = mock_file(engine->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - for (count = 0; count < ARRAY_SIZE(ctx); count++) { - ctx[count] = live_context(engine->i915, file); - if (IS_ERR(ctx[count])) { - err = PTR_ERR(ctx[count]); + for (count = 0; count < ARRAY_SIZE(ce); count++) { + ce[count] = intel_context_create(engine); + if (IS_ERR(ce[count])) { + err = PTR_ERR(ce[count]); while (--count) - i915_gem_context_put(ctx[count]); - goto err_file; + intel_context_put(ce[count]); + return err; } } + count = 0; while (!kthread_should_stop()) { unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1); struct i915_request *old = rq[idx]; struct i915_request *new; - new = igt_request_alloc(ctx[idx], engine); + new = intel_context_create_request(ce[idx]); if (IS_ERR(new)) { err = PTR_ERR(new); break; } - if (arg->flags & TEST_PRIORITY) - ctx[idx]->sched.priority = - i915_prandom_u32_max_state(512, &prng); - rq[idx] = i915_request_get(new); i915_request_add(new); + if (engine->schedule && arg->flags & TEST_PRIORITY) { + struct i915_sched_attr attr = { + .priority = + i915_prandom_u32_max_state(512, &prng), + }; + engine->schedule(rq[idx], &attr); + } + err = active_request_put(old); if (err) break; @@ -749,10 +752,10 @@ static int active_engine(void *data) /* Keep the first error */ if (!err) err = err__; + + intel_context_put(ce[count]); } -err_file: - mock_file_free(engine->i915, file); return err; } @@ -786,6 +789,7 @@ static int __igt_reset_engines(struct intel_gt *gt, struct active_engine threads[I915_NUM_ENGINES] = {}; unsigned long device = i915_reset_count(global); unsigned long count = 0, reported; + unsigned long heartbeat; IGT_TIMEOUT(end_time); if (flags & TEST_ACTIVE && @@ -826,7 +830,9 @@ static int __igt_reset_engines(struct intel_gt *gt, get_task_struct(tsk); } - intel_engine_pm_get(engine); + yield(); /* start all threads before we begin */ + + engine_heartbeat_disable(engine, &heartbeat); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { struct i915_request *rq = NULL; @@ -900,7 +906,8 @@ static int __igt_reset_engines(struct intel_gt *gt, } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - intel_engine_pm_put(engine); + engine_heartbeat_enable(engine, heartbeat); + pr_info("i915_reset_engine(%s:%s): %lu resets\n", engine->name, test_name, count); @@ -1016,7 +1023,7 @@ static int igt_reset_wait(void *arg) { struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; - struct intel_engine_cs *engine = gt->i915->engine[RCS0]; + struct intel_engine_cs *engine = gt->engine[RCS0]; struct i915_request *rq; unsigned int reset_count; struct hang h; @@ -1143,14 +1150,18 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, int (*fn)(void *), unsigned int flags) { - struct intel_engine_cs *engine = gt->i915->engine[RCS0]; + struct intel_engine_cs *engine = gt->engine[RCS0]; struct drm_i915_gem_object *obj; struct task_struct *tsk = NULL; struct i915_request *rq; struct evict_vma arg; struct hang h; + unsigned int pin_flags; int err; + if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE) + return 0; + if (!engine || !intel_engine_can_store_dword(engine)) return 0; @@ -1186,10 +1197,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, goto out_obj; } - err = i915_vma_pin(arg.vma, 0, 0, - i915_vma_is_ggtt(arg.vma) ? - PIN_GLOBAL | PIN_MAPPABLE : - PIN_USER); + pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER; + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + pin_flags |= PIN_MAPPABLE; + + err = i915_vma_pin(arg.vma, 0, 0, pin_flags); if (err) { i915_request_add(rq); goto out_obj; @@ -1292,32 +1305,21 @@ static int igt_reset_evict_ggtt(void *arg) static int igt_reset_evict_ppgtt(void *arg) { struct intel_gt *gt = arg; - struct i915_gem_context *ctx; - struct i915_address_space *vm; - struct drm_file *file; + struct i915_ppgtt *ppgtt; int err; - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); + /* aliasing == global gtt locking, covered above */ + if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL) + return 0; - ctx = live_context(gt->i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } + ppgtt = i915_ppgtt_create(gt->i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); - err = 0; - vm = i915_gem_context_get_vm_rcu(ctx); - if (!i915_is_ggtt(vm)) { - /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(gt, vm, - evict_vma, EXEC_OBJECT_WRITE); - } - i915_vm_put(vm); + err = __igt_reset_evict_vma(gt, &ppgtt->vm, + evict_vma, EXEC_OBJECT_WRITE); + i915_vm_put(&ppgtt->vm); -out: - mock_file_free(gt->i915, file); return err; } @@ -1493,7 +1495,7 @@ static int igt_handle_error(void *arg) { struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; - struct intel_engine_cs *engine = gt->i915->engine[RCS0]; + struct intel_engine_cs *engine = gt->engine[RCS0]; struct hang h; struct i915_request *rq; struct i915_gpu_state *error; @@ -1563,7 +1565,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", engine->name, mode, p->name); - tasklet_disable_nosync(t); + tasklet_disable(t); p->critical_section_begin(); err = intel_engine_reset(engine, NULL); @@ -1686,7 +1688,6 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) }; struct intel_gt *gt = &i915->gt; intel_wakeref_t wakeref; - bool saved_hangcheck; int err; if (!intel_has_gpu_reset(gt)) @@ -1696,12 +1697,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) return -EIO; /* we're long past hope of a successful reset */ wakeref = intel_runtime_pm_get(gt->uncore->rpm); - saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); - drain_delayed_work(>->hangcheck.work); /* flush param */ err = intel_gt_live_subtests(tests, gt); - i915_modparams.enable_hangcheck = saved_hangcheck; intel_runtime_pm_put(gt->uncore->rpm, wakeref); return err; diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index a7057785e420..fd3770e48ac7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -6,6 +6,7 @@ #include "intel_pm.h" /* intel_gpu_freq() */ #include "selftest_llc.h" +#include "intel_rps.h" static int gen6_verify_ring_freq(struct intel_llc *llc) { @@ -25,6 +26,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) for (gpu_freq = consts.min_gpu_freq; gpu_freq <= consts.max_gpu_freq; gpu_freq++) { + struct intel_rps *rps = &llc_to_gt(llc)->rps; + unsigned int ia_freq, ring_freq, found; u32 val; @@ -44,7 +47,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) if (found != ia_freq) { pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, - intel_gpu_freq(i915, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), found, ia_freq); err = -EINVAL; break; @@ -54,7 +57,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) if (found != ring_freq) { pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, - intel_gpu_freq(i915, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), found, ring_freq); err = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 5dc679781a08..9ec9833c9c7b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_reset.h" #include "i915_selftest.h" @@ -49,14 +50,31 @@ static struct i915_vma *create_scratch(struct intel_gt *gt) return vma; } +static void engine_heartbeat_disable(struct intel_engine_cs *engine, + unsigned long *saved) +{ + *saved = engine->props.heartbeat_interval_ms; + engine->props.heartbeat_interval_ms = 0; + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); +} + +static void engine_heartbeat_enable(struct intel_engine_cs *engine, + unsigned long saved) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = saved; +} + static int live_sanitycheck(void *arg) { struct intel_gt *gt = arg; - struct i915_gem_engines_iter it; - struct i915_gem_context *ctx; - struct intel_context *ce; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct igt_spinner spin; - int err = -ENOMEM; + int err = 0; if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) return 0; @@ -64,17 +82,20 @@ static int live_sanitycheck(void *arg) if (igt_spinner_init(&spin, gt)) return -ENOMEM; - ctx = kernel_context(gt->i915); - if (!ctx) - goto err_spin; - - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + for_each_engine(engine, gt, id) { + struct intel_context *ce; struct i915_request *rq; + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx; + goto out_ctx; } i915_request_add(rq); @@ -83,21 +104,21 @@ static int live_sanitycheck(void *arg) GEM_TRACE_DUMP(); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx; + goto out_ctx; } igt_spinner_end(&spin); if (igt_flush_test(gt->i915)) { err = -EIO; - goto err_ctx; + goto out_ctx; } + +out_ctx: + intel_context_put(ce); + if (err) + break; } - err = 0; -err_ctx: - i915_gem_context_unlock_engines(ctx); - kernel_context_close(ctx); -err_spin: igt_spinner_fini(&spin); return err; } @@ -105,7 +126,6 @@ err_spin: static int live_unlite_restore(struct intel_gt *gt, int prio) { struct intel_engine_cs *engine; - struct i915_gem_context *ctx; enum intel_engine_id id; struct igt_spinner spin; int err = -ENOMEM; @@ -118,15 +138,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) if (igt_spinner_init(&spin, gt)) return err; - ctx = kernel_context(gt->i915); - if (!ctx) - goto err_spin; - err = 0; for_each_engine(engine, gt, id) { struct intel_context *ce[2] = {}; struct i915_request *rq[2]; struct igt_live_test t; + unsigned long saved; int n; if (prio && !intel_engine_has_preemption(engine)) @@ -139,11 +156,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) err = -EIO; break; } + engine_heartbeat_disable(engine, &saved); for (n = 0; n < ARRAY_SIZE(ce); n++) { struct intel_context *tmp; - tmp = intel_context_create(ctx, engine); + tmp = intel_context_create(engine); if (IS_ERR(tmp)) { err = PTR_ERR(tmp); goto err_ce; @@ -168,12 +186,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) } GEM_BUG_ON(!ce[1]->ring->size); intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); - - local_irq_disable(); /* appease lockdep */ - __context_pin_acquire(ce[1]); __execlists_update_reg_state(ce[1], engine); - __context_pin_release(ce[1]); - local_irq_enable(); rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); if (IS_ERR(rq[0])) { @@ -251,14 +264,13 @@ err_ce: intel_context_put(ce[n]); } + engine_heartbeat_enable(engine, saved); if (igt_live_test_end(&t)) err = -EIO; if (err) break; } - kernel_context_close(ctx); -err_spin: igt_spinner_fini(&spin); return err; } @@ -313,17 +325,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) static struct i915_request * semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) { - struct i915_gem_context *ctx; + struct intel_context *ce; struct i915_request *rq; int err; - ctx = kernel_context(engine->i915); - if (!ctx) - return ERR_PTR(-ENOMEM); + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) - goto out_ctx; + goto out_ce; err = 0; if (rq->engine->emit_init_breadcrumb) @@ -336,8 +348,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) if (err) rq = ERR_PTR(err); -out_ctx: - kernel_context_close(ctx); +out_ce: + intel_context_put(ce); return rq; } @@ -352,7 +364,7 @@ release_queue(struct intel_engine_cs *engine, struct i915_request *rq; u32 *cs; - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -444,6 +456,8 @@ static int live_timeslice_preempt(void *arg) * need to preempt the current task and replace it with another * ready task. */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(obj)) @@ -470,12 +484,16 @@ static int live_timeslice_preempt(void *arg) enum intel_engine_id id; for_each_engine(engine, gt, id) { + unsigned long saved; + if (!intel_engine_has_preemption(engine)) continue; memset(vaddr, 0, PAGE_SIZE); + engine_heartbeat_disable(engine, &saved); err = slice_semaphore_queue(engine, vma, count); + engine_heartbeat_enable(engine, saved); if (err) goto err_pin; @@ -499,7 +517,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine) { struct i915_request *rq; - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) return rq; @@ -518,6 +536,11 @@ static void wait_for_submit(struct intel_engine_cs *engine, } while (!i915_request_is_active(rq)); } +static long timeslice_threshold(const struct intel_engine_cs *engine) +{ + return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1; +} + static int live_timeslice_queue(void *arg) { struct intel_gt *gt = arg; @@ -535,6 +558,8 @@ static int live_timeslice_queue(void *arg) * ELSP[1] is already occupied, so must rely on timeslicing to * eject ELSP[0] in favour of the queue.) */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(obj)) @@ -561,17 +586,19 @@ static int live_timeslice_queue(void *arg) .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), }; struct i915_request *rq, *nop; + unsigned long saved; if (!intel_engine_has_preemption(engine)) continue; + engine_heartbeat_disable(engine, &saved); memset(vaddr, 0, PAGE_SIZE); /* ELSP[0]: semaphore wait */ rq = semaphore_queue(engine, vma, 0); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_pin; + goto err_heartbeat; } engine->schedule(rq, &attr); wait_for_submit(engine, rq); @@ -580,8 +607,7 @@ static int live_timeslice_queue(void *arg) nop = nop_request(engine); if (IS_ERR(nop)) { err = PTR_ERR(nop); - i915_request_put(rq); - goto err_pin; + goto err_rq; } wait_for_submit(engine, nop); i915_request_put(nop); @@ -591,10 +617,8 @@ static int live_timeslice_queue(void *arg) /* Queue: semaphore signal, matching priority as semaphore */ err = release_queue(engine, vma, 1, effective_prio(rq)); - if (err) { - i915_request_put(rq); - goto err_pin; - } + if (err) + goto err_rq; intel_engine_flush_submission(engine); if (!READ_ONCE(engine->execlists.timer.expires) && @@ -612,8 +636,8 @@ static int live_timeslice_queue(void *arg) err = -EINVAL; } - /* Timeslice every jiffie, so within 2 we should signal */ - if (i915_request_wait(rq, 0, 3) < 0) { + /* Timeslice every jiffy, so within 2 we should signal */ + if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); @@ -625,12 +649,14 @@ static int live_timeslice_queue(void *arg) memset(vaddr, 0xff, PAGE_SIZE); err = -EIO; } +err_rq: i915_request_put(rq); +err_heartbeat: + engine_heartbeat_enable(engine, saved); if (err) break; } -err_pin: i915_vma_unpin(vma); err_map: i915_gem_object_unpin_map(obj); @@ -743,15 +769,19 @@ static int live_busywait_preempt(void *arg) *cs++ = 0; intel_ring_advance(lo, cs); + + i915_request_get(lo); i915_request_add(lo); if (wait_for(READ_ONCE(*map), 10)) { + i915_request_put(lo); err = -ETIMEDOUT; goto err_vma; } /* Low priority request should be busywaiting now */ if (i915_request_wait(lo, 0, 1) != -ETIME) { + i915_request_put(lo); pr_err("%s: Busywaiting request did not!\n", engine->name); err = -EIO; @@ -761,6 +791,7 @@ static int live_busywait_preempt(void *arg) hi = igt_request_alloc(ctx_hi, engine); if (IS_ERR(hi)) { err = PTR_ERR(hi); + i915_request_put(lo); goto err_vma; } @@ -768,6 +799,7 @@ static int live_busywait_preempt(void *arg) if (IS_ERR(cs)) { err = PTR_ERR(cs); i915_request_add(hi); + i915_request_put(lo); goto err_vma; } @@ -788,11 +820,13 @@ static int live_busywait_preempt(void *arg) intel_engine_dump(engine, &p, "%s\n", engine->name); GEM_TRACE_DUMP(); + i915_request_put(lo); intel_gt_set_wedged(gt); err = -EIO; goto err_vma; } GEM_BUG_ON(READ_ONCE(*map)); + i915_request_put(lo); if (igt_live_test_end(&t)) { err = -EIO; @@ -1165,6 +1199,325 @@ err_wedged: goto err_client_b; } +struct live_preempt_cancel { + struct intel_engine_cs *engine; + struct preempt_client a, b; +}; + +static int __cancel_active0(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP0 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + clear_bit(CONTEXT_BANNED, &rq->context->flags); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + intel_context_set_banned(rq->context); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_active1(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[2] = {}; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP1 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* no preemption */ + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + rq[1] = spinner_create_request(&arg->b.spin, + arg->b.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + intel_context_set_banned(rq[1]->context); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + igt_spinner_end(&arg->a.spin); + if (i915_request_wait(rq[1], 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq[0]->fence.error != 0) { + pr_err("Normal inflight0 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != -EIO) { + pr_err("Cancelled inflight1 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_queued(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[3] = {}; + struct igt_live_test t; + int err; + + /* Full ELSP and one in the wings */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + rq[2] = spinner_create_request(&arg->b.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[2])) { + err = PTR_ERR(rq[2]); + goto out; + } + + i915_request_get(rq[2]); + err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); + i915_request_add(rq[2]); + if (err) + goto out; + + intel_context_set_banned(rq[2]->context); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq[0]->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != 0) { + pr_err("Normal inflight1 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[2]->fence.error != -EIO) { + pr_err("Cancelled queued request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[2]); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_hostile(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + int err; + + /* Preempt cancel non-preemptible spinner in ELSP0 */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) + return PTR_ERR(rq); + + clear_bit(CONTEXT_BANNED, &rq->context->flags); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + intel_context_set_banned(rq->context); + err = intel_engine_pulse(arg->engine); /* force reset */ + if (err) + goto out; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq); + if (igt_flush_test(arg->engine->i915)) + err = -EIO; + return err; +} + +static int live_preempt_cancel(void *arg) +{ + struct intel_gt *gt = arg; + struct live_preempt_cancel data; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * To cancel an inflight context, we need to first remove it from the + * GPU. That sounds like preemption! Plus a little bit of bookkeeping. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (preempt_client_init(gt, &data.a)) + return -ENOMEM; + if (preempt_client_init(gt, &data.b)) + goto err_client_a; + + for_each_engine(data.engine, gt, id) { + if (!intel_engine_has_preemption(data.engine)) + continue; + + err = __cancel_active0(&data); + if (err) + goto err_wedged; + + err = __cancel_active1(&data); + if (err) + goto err_wedged; + + err = __cancel_queued(&data); + if (err) + goto err_wedged; + + err = __cancel_hostile(&data); + if (err) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&data.b); +err_client_a: + preempt_client_fini(&data.a); + return err; + +err_wedged: + GEM_TRACE_DUMP(); + igt_spinner_end(&data.b.spin); + igt_spinner_end(&data.a.spin); + intel_gt_set_wedged(gt); + goto err_client_b; +} + static int live_suppress_self_preempt(void *arg) { struct intel_gt *gt = arg; @@ -1341,6 +1694,7 @@ static int live_suppress_wait_preempt(void *arg) { struct intel_gt *gt = arg; struct preempt_client client[4]; + struct i915_request *rq[ARRAY_SIZE(client)] = {}; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -1374,7 +1728,6 @@ static int live_suppress_wait_preempt(void *arg) continue; for (depth = 0; depth < ARRAY_SIZE(client); depth++) { - struct i915_request *rq[ARRAY_SIZE(client)]; struct i915_request *dummy; engine->execlists.preempt_hang.count = 0; @@ -1384,18 +1737,22 @@ static int live_suppress_wait_preempt(void *arg) goto err_client_3; for (i = 0; i < ARRAY_SIZE(client); i++) { - rq[i] = spinner_create_request(&client[i].spin, - client[i].ctx, engine, - MI_NOOP); - if (IS_ERR(rq[i])) { - err = PTR_ERR(rq[i]); + struct i915_request *this; + + this = spinner_create_request(&client[i].spin, + client[i].ctx, engine, + MI_NOOP); + if (IS_ERR(this)) { + err = PTR_ERR(this); goto err_wedged; } /* Disable NEWCLIENT promotion */ - __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request, + __i915_active_fence_set(&i915_request_timeline(this)->last_request, &dummy->fence); - i915_request_add(rq[i]); + + rq[i] = i915_request_get(this); + i915_request_add(this); } dummy_request_free(dummy); @@ -1416,8 +1773,11 @@ static int live_suppress_wait_preempt(void *arg) goto err_wedged; } - for (i = 0; i < ARRAY_SIZE(client); i++) + for (i = 0; i < ARRAY_SIZE(client); i++) { igt_spinner_end(&client[i].spin); + i915_request_put(rq[i]); + rq[i] = NULL; + } if (igt_flush_test(gt->i915)) goto err_wedged; @@ -1445,8 +1805,10 @@ err_client_0: return err; err_wedged: - for (i = 0; i < ARRAY_SIZE(client); i++) + for (i = 0; i < ARRAY_SIZE(client); i++) { igt_spinner_end(&client[i].spin); + i915_request_put(rq[i]); + } intel_gt_set_wedged(gt); err = -EIO; goto err_client_3; @@ -1491,6 +1853,8 @@ static int live_chain_preempt(void *arg) MI_ARB_CHECK); if (IS_ERR(rq)) goto err_wedged; + + i915_request_get(rq); i915_request_add(rq); ring_size = rq->wa_tail - rq->head; @@ -1503,8 +1867,10 @@ static int live_chain_preempt(void *arg) igt_spinner_end(&lo.spin); if (i915_request_wait(rq, 0, HZ / 2) < 0) { pr_err("Timed out waiting to flush %s\n", engine->name); + i915_request_put(rq); goto err_wedged; } + i915_request_put(rq); if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; @@ -1538,6 +1904,8 @@ static int live_chain_preempt(void *arg) rq = igt_request_alloc(hi.ctx, engine); if (IS_ERR(rq)) goto err_wedged; + + i915_request_get(rq); i915_request_add(rq); engine->schedule(rq, &attr); @@ -1550,14 +1918,19 @@ static int live_chain_preempt(void *arg) count); intel_engine_dump(engine, &p, "%s\n", engine->name); + i915_request_put(rq); goto err_wedged; } igt_spinner_end(&lo.spin); + i915_request_put(rq); rq = igt_request_alloc(lo.ctx, engine); if (IS_ERR(rq)) goto err_wedged; + + i915_request_get(rq); i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); @@ -1566,8 +1939,11 @@ static int live_chain_preempt(void *arg) count); intel_engine_dump(engine, &p, "%s\n", engine->name); + + i915_request_put(rq); goto err_wedged; } + i915_request_put(rq); } if (igt_live_test_end(&t)) { @@ -1591,6 +1967,201 @@ err_wedged: goto err_client_lo; } +static int create_gang(struct intel_engine_cs *engine, + struct i915_request **prev) +{ + struct drm_i915_gem_object *obj; + struct intel_context *ce; + struct i915_request *rq; + struct i915_vma *vma; + u32 *cs; + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + obj = i915_gem_object_create_internal(engine->i915, 4096); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_ce; + } + + vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + cs = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cs)) + goto err_obj; + + /* Semaphore target: spin until zero */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = lower_32_bits(vma->node.start); + *cs++ = upper_32_bits(vma->node.start); + + if (*prev) { + u64 offset = (*prev)->batch->node.start; + + /* Terminate the spinner in the next lower priority batch. */ + *cs++ = MI_STORE_DWORD_IMM_GEN4; + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = 0; + } + + *cs++ = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + goto err_obj; + + rq->batch = vma; + i915_request_get(rq); + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(vma); + i915_request_add(rq); + if (err) + goto err_rq; + + i915_gem_object_put(obj); + intel_context_put(ce); + + rq->client_link.next = &(*prev)->client_link; + *prev = rq; + return 0; + +err_rq: + i915_request_put(rq); +err_obj: + i915_gem_object_put(obj); +err_ce: + intel_context_put(ce); + return err; +} + +static int live_preempt_gang(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + /* + * Build as long a chain of preempters as we can, with each + * request higher priority than the last. Once we are ready, we release + * the last batch which then precolates down the chain, each releasing + * the next oldest in turn. The intent is to simply push as hard as we + * can with the number of preemptions, trying to exceed narrow HW + * limits. At a minimum, we insist that we can sort all the user + * high priority levels into execution order. + */ + + for_each_engine(engine, gt, id) { + struct i915_request *rq = NULL; + struct igt_live_test t; + IGT_TIMEOUT(end_time); + int prio = 0; + int err = 0; + u32 *cs; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) + return -EIO; + + do { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(prio++), + }; + + err = create_gang(engine, &rq); + if (err) + break; + + /* Submit each spinner at increasing priority */ + engine->schedule(rq, &attr); + + if (prio <= I915_PRIORITY_MAX) + continue; + + if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT)) + break; + + if (__igt_timeout(end_time, NULL)) + break; + } while (1); + pr_debug("%s: Preempt chain of %d requests\n", + engine->name, prio); + + /* + * Such that the last spinner is the highest priority and + * should execute first. When that spinner completes, + * it will terminate the next lowest spinner until there + * are no more spinners and the gang is complete. + */ + cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); + if (!IS_ERR(cs)) { + *cs = 0; + i915_gem_object_unpin_map(rq->batch->obj); + } else { + err = PTR_ERR(cs); + intel_gt_set_wedged(gt); + } + + while (rq) { /* wait for each rq from highest to lowest prio */ + struct i915_request *n = + list_next_entry(rq, client_link); + + if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(engine->i915->drm.dev); + + pr_err("Failed to flush chain of %d requests, at %d\n", + prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -ETIME; + } + + i915_request_put(rq); + rq = n; + } + + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + static int live_preempt_hang(void *arg) { struct intel_gt *gt = arg; @@ -1702,6 +2273,105 @@ err_spin_hi: return err; } +static int live_preempt_timeout(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Check that we force preemption to occur by cancelling the previous + * context if it refuses to yield the GPU. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + if (igt_spinner_init(&spin_lo, gt)) + return -ENOMEM; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + unsigned long saved_timeout; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = igt_request_alloc(ctx_hi, engine); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + /* Flush the previous CS ack before changing timeouts */ + while (READ_ONCE(engine->execlists.pending[0])) + cpu_relax(); + + saved_timeout = engine->props.preempt_timeout_ms; + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + + i915_request_get(rq); + i915_request_add(rq); + + intel_engine_flush_submission(engine); + engine->props.preempt_timeout_ms = saved_timeout; + + if (i915_request_wait(rq, 0, HZ / 10) < 0) { + intel_gt_set_wedged(gt); + i915_request_put(rq); + err = -ETIME; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_lo); + i915_request_put(rq); + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); + return err; +} + static int random_range(struct rnd_state *rnd, int min, int max) { return i915_prandom_u32_max_state(max - min, rnd) + min; @@ -1829,6 +2499,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) get_task_struct(tsk[id]); } + yield(); /* start all threads before we kthread_stop() */ + count = 0; for_each_engine(engine, smoke->gt, id) { int status; @@ -1966,28 +2638,18 @@ static int nop_virtual_engine(struct intel_gt *gt, #define CHAIN BIT(0) { IGT_TIMEOUT(end_time); - struct i915_request *request[16]; - struct i915_gem_context *ctx[16]; + struct i915_request *request[16] = {}; struct intel_context *ve[16]; unsigned long n, prime, nc; struct igt_live_test t; ktime_t times[2] = {}; int err; - GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx)); + GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); for (n = 0; n < nctx; n++) { - ctx[n] = kernel_context(gt->i915); - if (!ctx[n]) { - err = -ENOMEM; - nctx = n; - goto out; - } - - ve[n] = intel_execlists_create_virtual(ctx[n], - siblings, nsibling); + ve[n] = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve[n])) { - kernel_context_close(ctx[n]); err = PTR_ERR(ve[n]); nctx = n; goto out; @@ -1996,7 +2658,6 @@ static int nop_virtual_engine(struct intel_gt *gt, err = intel_context_pin(ve[n]); if (err) { intel_context_put(ve[n]); - kernel_context_close(ctx[n]); nctx = n; goto out; } @@ -2012,27 +2673,35 @@ static int nop_virtual_engine(struct intel_gt *gt, if (flags & CHAIN) { for (nc = 0; nc < nctx; nc++) { for (n = 0; n < prime; n++) { - request[nc] = - i915_request_create(ve[nc]); - if (IS_ERR(request[nc])) { - err = PTR_ERR(request[nc]); + struct i915_request *rq; + + rq = i915_request_create(ve[nc]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); goto out; } - i915_request_add(request[nc]); + if (request[nc]) + i915_request_put(request[nc]); + request[nc] = i915_request_get(rq); + i915_request_add(rq); } } } else { for (n = 0; n < prime; n++) { for (nc = 0; nc < nctx; nc++) { - request[nc] = - i915_request_create(ve[nc]); - if (IS_ERR(request[nc])) { - err = PTR_ERR(request[nc]); + struct i915_request *rq; + + rq = i915_request_create(ve[nc]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); goto out; } - i915_request_add(request[nc]); + if (request[nc]) + i915_request_put(request[nc]); + request[nc] = i915_request_get(rq); + i915_request_add(rq); } } } @@ -2058,6 +2727,11 @@ static int nop_virtual_engine(struct intel_gt *gt, if (prime == 1) times[0] = times[1]; + for (nc = 0; nc < nctx; nc++) { + i915_request_put(request[nc]); + request[nc] = NULL; + } + if (__igt_timeout(end_time, NULL)) break; } @@ -2075,9 +2749,9 @@ out: err = -EIO; for (nc = 0; nc < nctx; nc++) { + i915_request_put(request[nc]); intel_context_unpin(ve[nc]); intel_context_put(ve[nc]); - kernel_context_close(ctx[nc]); } return err; } @@ -2136,7 +2810,6 @@ static int mask_virtual_engine(struct intel_gt *gt, unsigned int nsibling) { struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; - struct i915_gem_context *ctx; struct intel_context *ve; struct igt_live_test t; unsigned int n; @@ -2147,11 +2820,7 @@ static int mask_virtual_engine(struct intel_gt *gt, * restrict it to our desired engine within the virtual engine. */ - ctx = kernel_context(gt->i915); - if (!ctx) - return -ENOMEM; - - ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_close; @@ -2219,7 +2888,6 @@ out_unpin: out_put: intel_context_put(ve); out_close: - kernel_context_close(ctx); return err; } @@ -2259,7 +2927,6 @@ static int preserved_virtual_engine(struct intel_gt *gt, unsigned int nsibling) { struct i915_request *last = NULL; - struct i915_gem_context *ctx; struct intel_context *ve; struct i915_vma *scratch; struct igt_live_test t; @@ -2267,17 +2934,11 @@ static int preserved_virtual_engine(struct intel_gt *gt, int err = 0; u32 *cs; - ctx = kernel_context(gt->i915); - if (!ctx) - return -ENOMEM; - scratch = create_scratch(siblings[0]->gt); - if (IS_ERR(scratch)) { - err = PTR_ERR(scratch); - goto out_close; - } + if (IS_ERR(scratch)) + return PTR_ERR(scratch); - ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_scratch; @@ -2360,8 +3021,6 @@ out_put: intel_context_put(ve); out_scratch: i915_vma_unpin_and_release(&scratch, 0); -out_close: - kernel_context_close(ctx); return err; } @@ -2413,16 +3072,54 @@ static int bond_virtual_engine(struct intel_gt *gt, #define BOND_SCHEDULE BIT(0) { struct intel_engine_cs *master; - struct i915_gem_context *ctx; struct i915_request *rq[16]; enum intel_engine_id id; + struct igt_spinner spin; unsigned long n; int err; + /* + * A set of bonded requests is intended to be run concurrently + * across a number of engines. We use one request per-engine + * and a magic fence to schedule each of the bonded requests + * at the same time. A consequence of our current scheduler is that + * we only move requests to the HW ready queue when the request + * becomes ready, that is when all of its prerequisite fences have + * been signaled. As one of those fences is the master submit fence, + * there is a delay on all secondary fences as the HW may be + * currently busy. Equally, as all the requests are independent, + * they may have other fences that delay individual request + * submission to HW. Ergo, we do not guarantee that all requests are + * immediately submitted to HW at the same time, just that if the + * rules are abided by, they are ready at the same time as the + * first is submitted. Userspace can embed semaphores in its batch + * to ensure parallel execution of its phases as it requires. + * Though naturally it gets requested that perhaps the scheduler should + * take care of parallel execution, even across preemption events on + * different HW. (The proper answer is of course "lalalala".) + * + * With the submit-fence, we have identified three possible phases + * of synchronisation depending on the master fence: queued (not + * ready), executing, and signaled. The first two are quite simple + * and checked below. However, the signaled master fence handling is + * contentious. Currently we do not distinguish between a signaled + * fence and an expired fence, as once signaled it does not convey + * any information about the previous execution. It may even be freed + * and hence checking later it may not exist at all. Ergo we currently + * do not apply the bonding constraint for an already signaled fence, + * as our expectation is that it should not constrain the secondaries + * and is outside of the scope of the bonded request API (i.e. all + * userspace requests are meant to be running in parallel). As + * it imposes no constraint, and is effectively a no-op, we do not + * check below as normal execution flows are checked extensively above. + * + * XXX Is the degenerate handling of signaled submit fences the + * expected behaviour for userpace? + */ + GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); - ctx = kernel_context(gt->i915); - if (!ctx) + if (igt_spinner_init(&spin, gt)) return -ENOMEM; err = 0; @@ -2435,7 +3132,9 @@ static int bond_virtual_engine(struct intel_gt *gt, memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); - rq[0] = igt_request_alloc(ctx, master); + rq[0] = igt_spinner_create_request(&spin, + master->kernel_context, + MI_NOOP); if (IS_ERR(rq[0])) { err = PTR_ERR(rq[0]); goto out; @@ -2448,16 +3147,21 @@ static int bond_virtual_engine(struct intel_gt *gt, &fence, GFP_KERNEL); } + i915_request_add(rq[0]); if (err < 0) goto out; + if (!(flags & BOND_SCHEDULE) && + !igt_wait_for_spinner(&spin, rq[0])) { + err = -EIO; + goto out; + } + for (n = 0; n < nsibling; n++) { struct intel_context *ve; - ve = intel_execlists_create_virtual(ctx, - siblings, - nsibling); + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); onstack_fence_fini(&fence); @@ -2499,6 +3203,8 @@ static int bond_virtual_engine(struct intel_gt *gt, } } onstack_fence_fini(&fence); + intel_engine_flush_submission(master); + igt_spinner_end(&spin); if (i915_request_wait(rq[0], 0, HZ / 10) < 0) { pr_err("Master request did not execute (on %s)!\n", @@ -2535,7 +3241,7 @@ out: if (igt_flush_test(gt->i915)) err = -EIO; - kernel_context_close(ctx); + igt_spinner_fini(&spin); return err; } @@ -2599,10 +3305,13 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_preempt), SUBTEST(live_late_preempt), SUBTEST(live_nopreempt), + SUBTEST(live_preempt_cancel), SUBTEST(live_suppress_self_preempt), SUBTEST(live_suppress_wait_preempt), SUBTEST(live_chain_preempt), + SUBTEST(live_preempt_gang), SUBTEST(live_preempt_hang), + SUBTEST(live_preempt_timeout), SUBTEST(live_preempt_smoke), SUBTEST(live_virtual_engine), SUBTEST(live_virtual_mask), @@ -2749,8 +3458,101 @@ static int live_lrc_layout(void *arg) return err; } -static int __live_lrc_state(struct i915_gem_context *fixme, - struct intel_engine_cs *engine, +static int find_offset(const u32 *lri, u32 offset) +{ + int i; + + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) + if (lri[i] == offset) + return i; + + return -1; +} + +static int live_lrc_fixed(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * Check the assumed register offsets match the actual locations in + * the context image. + */ + + for_each_engine(engine, gt, id) { + const struct { + u32 reg; + u32 offset; + const char *name; + } tbl[] = { + { + i915_mmio_reg_offset(RING_START(engine->mmio_base)), + CTX_RING_START - 1, + "RING_START" + }, + { + i915_mmio_reg_offset(RING_CTL(engine->mmio_base)), + CTX_RING_CTL - 1, + "RING_CTL" + }, + { + i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)), + CTX_RING_HEAD - 1, + "RING_HEAD" + }, + { + i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)), + CTX_RING_TAIL - 1, + "RING_TAIL" + }, + { + i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)), + lrc_ring_mi_mode(engine), + "RING_MI_MODE" + }, + { + i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)), + CTX_BB_STATE - 1, + "BB_STATE" + }, + { }, + }, *t; + u32 *hw; + + if (!engine->default_state) + continue; + + hw = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + break; + } + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + for (t = tbl; t->name; t++) { + int dw = find_offset(hw, t->reg); + + if (dw != t->offset) { + pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n", + engine->name, + t->name, + t->reg, + dw, + t->offset); + err = -EINVAL; + } + } + + i915_gem_object_unpin_map(engine->default_state); + } + + return err; +} + +static int __live_lrc_state(struct intel_engine_cs *engine, struct i915_vma *scratch) { struct intel_context *ce; @@ -2765,7 +3567,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme, int err; int n; - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); @@ -2839,7 +3641,6 @@ static int live_lrc_state(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; struct i915_vma *scratch; enum intel_engine_id id; int err = 0; @@ -2849,18 +3650,12 @@ static int live_lrc_state(void *arg) * intel_context. */ - fixme = kernel_context(gt->i915); - if (!fixme) - return -ENOMEM; - scratch = create_scratch(gt); - if (IS_ERR(scratch)) { - err = PTR_ERR(scratch); - goto out_close; - } + if (IS_ERR(scratch)) + return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - err = __live_lrc_state(fixme, engine, scratch); + err = __live_lrc_state(engine, scratch); if (err) break; } @@ -2869,8 +3664,6 @@ static int live_lrc_state(void *arg) err = -EIO; i915_vma_unpin_and_release(&scratch, 0); -out_close: - kernel_context_close(fixme); return err; } @@ -2880,7 +3673,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine) u32 *cs; int n; - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -2903,8 +3696,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine) return 0; } -static int __live_gpr_clear(struct i915_gem_context *fixme, - struct intel_engine_cs *engine, +static int __live_gpr_clear(struct intel_engine_cs *engine, struct i915_vma *scratch) { struct intel_context *ce; @@ -2920,7 +3712,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme, if (err) return err; - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); @@ -2982,7 +3774,6 @@ static int live_gpr_clear(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; struct i915_vma *scratch; enum intel_engine_id id; int err = 0; @@ -2992,18 +3783,12 @@ static int live_gpr_clear(void *arg) * to avoid leaking any information from previous contexts. */ - fixme = kernel_context(gt->i915); - if (!fixme) - return -ENOMEM; - scratch = create_scratch(gt); - if (IS_ERR(scratch)) { - err = PTR_ERR(scratch); - goto out_close; - } + if (IS_ERR(scratch)) + return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - err = __live_gpr_clear(fixme, engine, scratch); + err = __live_gpr_clear(engine, scratch); if (err) break; } @@ -3012,8 +3797,6 @@ static int live_gpr_clear(void *arg) err = -EIO; i915_vma_unpin_and_release(&scratch, 0); -out_close: - kernel_context_close(fixme); return err; } @@ -3021,6 +3804,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_lrc_layout), + SUBTEST(live_lrc_fixed), SUBTEST(live_lrc_state), SUBTEST(live_gpr_clear), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c new file mode 100644 index 000000000000..de1f83100fb6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -0,0 +1,419 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "gt/intel_engine_pm.h" +#include "i915_selftest.h" + +#include "gem/selftests/mock_context.h" +#include "selftests/igt_reset.h" +#include "selftests/igt_spinner.h" + +struct live_mocs { + struct drm_i915_mocs_table table; + struct i915_vma *scratch; + void *vaddr; +}; + +static int request_add_sync(struct i915_request *rq, int err) +{ + i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + + return err; +} + +static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) +{ + int err = 0; + + i915_request_get(rq); + i915_request_add(rq); + if (spin && !igt_wait_for_spinner(spin, rq)) + err = -ETIME; + i915_request_put(rq); + + return err; +} + +static struct i915_vma *create_scratch(struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + return vma; +} + +static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) +{ + int err; + + if (!get_mocs_settings(gt->i915, &arg->table)) + return -EINVAL; + + arg->scratch = create_scratch(gt); + if (IS_ERR(arg->scratch)) + return PTR_ERR(arg->scratch); + + arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB); + if (IS_ERR(arg->vaddr)) { + err = PTR_ERR(arg->vaddr); + goto err_scratch; + } + + return 0; + +err_scratch: + i915_vma_unpin_and_release(&arg->scratch, 0); + return err; +} + +static void live_mocs_fini(struct live_mocs *arg) +{ + i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP); +} + +static int read_regs(struct i915_request *rq, + u32 addr, unsigned int count, + uint32_t *offset) +{ + unsigned int i; + u32 *cs; + + GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32))); + + cs = intel_ring_begin(rq, 4 * count); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + for (i = 0; i < count; i++) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = addr; + *cs++ = *offset; + *cs++ = 0; + + addr += sizeof(u32); + *offset += sizeof(u32); + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static int read_mocs_table(struct i915_request *rq, + const struct drm_i915_mocs_table *table, + uint32_t *offset) +{ + u32 addr; + + if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915)) + addr = global_mocs_offset(); + else + addr = mocs_offset(rq->engine); + + return read_regs(rq, addr, table->n_entries, offset); +} + +static int read_l3cc_table(struct i915_request *rq, + const struct drm_i915_mocs_table *table, + uint32_t *offset) +{ + u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); + + return read_regs(rq, addr, (table->n_entries + 1) / 2, offset); +} + +static int check_mocs_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table, + uint32_t **vaddr) +{ + unsigned int i; + u32 expect; + + for_each_mocs(expect, table, i) { + if (**vaddr != expect) { + pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n", + engine->name, i, **vaddr, expect); + return -EINVAL; + } + ++*vaddr; + } + + return 0; +} + +static bool mcr_range(struct drm_i915_private *i915, u32 offset) +{ + /* + * Registers in this range are affected by the MCR selector + * which only controls CPU initiated MMIO. Routing does not + * work for CS access so we cannot verify them on this path. + */ + return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; +} + +static int check_l3cc_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table, + uint32_t **vaddr) +{ + /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */ + u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); + unsigned int i; + u32 expect; + + for_each_l3cc(expect, table, i) { + if (!mcr_range(engine->i915, reg) && **vaddr != expect) { + pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n", + engine->name, i, **vaddr, expect); + return -EINVAL; + } + ++*vaddr; + reg += 4; + } + + return 0; +} + +static int check_mocs_engine(struct live_mocs *arg, + struct intel_context *ce) +{ + struct i915_vma *vma = arg->scratch; + struct i915_request *rq; + u32 offset; + u32 *vaddr; + int err; + + memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, true); + if (!err) + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + + /* Read the mocs tables back using SRM */ + offset = i915_ggtt_offset(vma); + if (!err) + err = read_mocs_table(rq, &arg->table, &offset); + if (!err && ce->engine->class == RENDER_CLASS) + err = read_l3cc_table(rq, &arg->table, &offset); + offset -= i915_ggtt_offset(vma); + GEM_BUG_ON(offset > PAGE_SIZE); + + err = request_add_sync(rq, err); + if (err) + return err; + + /* Compare the results against the expected tables */ + vaddr = arg->vaddr; + if (!err) + err = check_mocs_table(ce->engine, &arg->table, &vaddr); + if (!err && ce->engine->class == RENDER_CLASS) + err = check_l3cc_table(ce->engine, &arg->table, &vaddr); + if (err) + return err; + + GEM_BUG_ON(arg->vaddr + offset != vaddr); + return 0; +} + +static int live_mocs_kernel(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct live_mocs mocs; + int err; + + /* Basic check the system is configured with the expected mocs table */ + + err = live_mocs_init(&mocs, gt); + if (err) + return err; + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = check_mocs_engine(&mocs, engine->kernel_context); + intel_engine_pm_put(engine); + if (err) + break; + } + + live_mocs_fini(&mocs); + return err; +} + +static int live_mocs_clean(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct live_mocs mocs; + int err; + + /* Every new context should see the same mocs table */ + + err = live_mocs_init(&mocs, gt); + if (err) + return err; + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + err = check_mocs_engine(&mocs, ce); + intel_context_put(ce); + if (err) + break; + } + + live_mocs_fini(&mocs); + return err; +} + +static int active_engine_reset(struct intel_context *ce, + const char *reason) +{ + struct igt_spinner spin; + struct i915_request *rq; + int err; + + err = igt_spinner_init(&spin, ce->engine->gt); + if (err) + return err; + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + if (IS_ERR(rq)) { + igt_spinner_fini(&spin); + return PTR_ERR(rq); + } + + err = request_add_spin(rq, &spin); + if (err == 0) + err = intel_engine_reset(ce->engine, reason); + + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + + return err; +} + +static int __live_mocs_reset(struct live_mocs *mocs, + struct intel_context *ce) +{ + int err; + + err = intel_engine_reset(ce->engine, "mocs"); + if (err) + return err; + + err = check_mocs_engine(mocs, ce); + if (err) + return err; + + err = active_engine_reset(ce, "mocs"); + if (err) + return err; + + err = check_mocs_engine(mocs, ce); + if (err) + return err; + + intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs"); + + err = check_mocs_engine(mocs, ce); + if (err) + return err; + + return 0; +} + +static int live_mocs_reset(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct live_mocs mocs; + int err = 0; + + /* Check the mocs setup is retained over per-engine and global resets */ + + if (!intel_has_reset_engine(gt)) + return 0; + + err = live_mocs_init(&mocs, gt); + if (err) + return err; + + igt_global_reset_lock(gt); + for_each_engine(engine, gt, id) { + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + intel_engine_pm_get(engine); + err = __live_mocs_reset(&mocs, ce); + intel_engine_pm_put(engine); + + intel_context_put(ce); + if (err) + break; + } + igt_global_reset_unlock(gt); + + live_mocs_fini(&mocs); + return err; +} + +int intel_mocs_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_mocs_kernel), + SUBTEST(live_mocs_clean), + SUBTEST(live_mocs_reset), + }; + struct drm_i915_mocs_table table; + + if (!get_mocs_settings(i915, &table)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c new file mode 100644 index 000000000000..8cc55a0e9e06 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -0,0 +1,203 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_context.h" +#include "intel_engine_pm.h" +#include "intel_gt_requests.h" +#include "intel_ring.h" +#include "selftest_rc6.h" + +#include "selftests/i915_random.h" + +int live_rc6_manual(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_rc6 *rc6 = >->rc6; + intel_wakeref_t wakeref; + u64 res[2]; + int err = 0; + + /* + * Our claim is that we can "encourage" the GPU to enter rc6 at will. + * Let's try it! + */ + + if (!rc6->enabled) + return 0; + + /* bsw/byt use a PCU and decouple RC6 from our manual control */ + if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) + return 0; + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + /* Force RC6 off for starters */ + __intel_rc6_disable(rc6); + msleep(1); /* wakeup is not immediate, takes about 100us on icl */ + + res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + msleep(250); + res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + if ((res[1] - res[0]) >> 10) { + pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n", + (res[1] - res[0]) >> 10); + err = -EINVAL; + goto out_unlock; + } + + /* Manually enter RC6 */ + intel_rc6_park(rc6); + + res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + msleep(100); + res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + + if (res[1] == res[0]) { + pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n", + intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE), + intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL)); + err = -EINVAL; + } + + /* Restore what should have been the original state! */ + intel_rc6_unpark(rc6); + +out_unlock: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + return err; +} + +static const u32 *__live_rc6_ctx(struct intel_context *ce) +{ + struct i915_request *rq; + const u32 *result; + u32 cmd; + u32 *cs; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return ERR_CAST(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return cs; + } + + cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; + if (INTEL_GEN(rq->i915) >= 8) + cmd++; + + *cs++ = cmd; + *cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO); + *cs++ = ce->timeline->hwsp_offset + 8; + *cs++ = 0; + intel_ring_advance(rq, cs); + + result = rq->hwsp_seqno + 2; + i915_request_add(rq); + + return result; +} + +static struct intel_engine_cs ** +randomised_engines(struct intel_gt *gt, + struct rnd_state *prng, + unsigned int *count) +{ + struct intel_engine_cs *engine, **engines; + enum intel_engine_id id; + int n; + + n = 0; + for_each_engine(engine, gt, id) + n++; + if (!n) + return NULL; + + engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL); + if (!engines) + return NULL; + + n = 0; + for_each_engine(engine, gt, id) + engines[n++] = engine; + + i915_prandom_shuffle(engines, sizeof(*engines), n, prng); + + *count = n; + return engines; +} + +int live_rc6_ctx_wa(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs **engines; + unsigned int n, count; + I915_RND_STATE(prng); + int err = 0; + + /* A read of CTX_INFO upsets rc6. Poke the bear! */ + if (INTEL_GEN(gt->i915) < 8) + return 0; + + engines = randomised_engines(gt, &prng, &count); + if (!engines) + return 0; + + for (n = 0; n < count; n++) { + struct intel_engine_cs *engine = engines[n]; + int pass; + + for (pass = 0; pass < 2; pass++) { + struct intel_context *ce; + unsigned int resets = + i915_reset_engine_count(>->i915->gpu_error, + engine); + const u32 *res; + + /* Use a sacrifical context */ + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + intel_engine_pm_get(engine); + res = __live_rc6_ctx(ce); + intel_engine_pm_put(engine); + intel_context_put(ce); + if (IS_ERR(res)) { + err = PTR_ERR(res); + goto out; + } + + if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) { + intel_gt_set_wedged(gt); + err = -ETIME; + goto out; + } + + intel_gt_pm_wait_for_idle(gt); + pr_debug("%s: CTX_INFO=%0x\n", + engine->name, READ_ONCE(*res)); + + if (resets != + i915_reset_engine_count(>->i915->gpu_error, + engine)) { + pr_err("%s: GPU reset required\n", + engine->name); + add_taint_for_CI(TAINT_WARN); + err = -EIO; + goto out; + } + } + } + +out: + kfree(engines); + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h new file mode 100644 index 000000000000..762fd442d7b2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef SELFTEST_RC6_H +#define SELFTEST_RC6_H + +int live_rc6_ctx_wa(void *arg); +int live_rc6_manual(void *arg); + +#endif /* SELFTEST_RC6_H */ diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 6efb9221b7fa..6ad6aca315f6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -126,7 +126,7 @@ static int igt_atomic_engine_reset(void *arg) goto out_unlock; for_each_engine(engine, gt, id) { - tasklet_disable_nosync(&engine->execlists.tasklet); + tasklet_disable(&engine->execlists.tasklet); intel_engine_pm_get(engine); for (p = igt_atomic_phases; p->name; p++) { diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index dac86f699a4c..e2d78cc22fb4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -9,6 +9,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_requests.h" +#include "intel_ring.h" #include "../selftests/i915_random.h" #include "../i915_selftest.h" @@ -457,7 +458,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) goto out; } - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) goto out_unpin; @@ -674,9 +675,7 @@ static int live_hwsp_wrap(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - intel_engine_pm_get(engine); - rq = i915_request_create(engine->kernel_context); - intel_engine_pm_put(engine); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index ef02920cec29..ac1921854cbf 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -264,22 +264,15 @@ static int switch_to_scratch_context(struct intel_engine_cs *engine, struct igt_spinner *spin) { - struct i915_gem_context *ctx; struct intel_context *ce; struct i915_request *rq; int err = 0; - ctx = kernel_context(engine->i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); - - ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); - GEM_BUG_ON(IS_ERR(ce)); + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); rq = igt_spinner_create_request(spin, ce, MI_NOOP); - intel_context_put(ce); if (IS_ERR(rq)) { @@ -293,7 +286,6 @@ err: if (err && spin) igt_spinner_end(spin); - kernel_context_close(ctx); return err; } @@ -367,20 +359,17 @@ out_ctx: return err; } -static struct i915_vma *create_batch(struct i915_gem_context *ctx) +static struct i915_vma *create_batch(struct i915_address_space *vm) { struct drm_i915_gem_object *obj; - struct i915_address_space *vm; struct i915_vma *vma; int err; - obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE); + obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE); if (IS_ERR(obj)) return ERR_CAST(obj); - vm = i915_gem_context_get_vm_rcu(ctx); vma = i915_vma_instance(obj, vm, NULL); - i915_vm_put(vm); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -452,8 +441,7 @@ static int whitelist_writable_count(struct intel_engine_cs *engine) return count; } -static int check_dirty_whitelist(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static int check_dirty_whitelist(struct intel_context *ce) { const u32 values[] = { 0x00000000, @@ -481,19 +469,17 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, 0xffff00ff, 0xffffffff, }; - struct i915_address_space *vm; + struct intel_engine_cs *engine = ce->engine; struct i915_vma *scratch; struct i915_vma *batch; int err = 0, i, v; u32 *cs, *results; - vm = i915_gem_context_get_vm_rcu(ctx); - scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1); - i915_vm_put(vm); + scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1); if (IS_ERR(scratch)) return PTR_ERR(scratch); - batch = create_batch(ctx); + batch = create_batch(ce->vm); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto out_scratch; @@ -513,9 +499,12 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, ro_reg = ro_register(reg); + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + srm = MI_STORE_REGISTER_MEM; lrm = MI_LOAD_REGISTER_MEM; - if (INTEL_GEN(ctx->i915) >= 8) + if (INTEL_GEN(engine->i915) >= 8) lrm++, srm++; pr_debug("%s: Writing garbage to %x\n", @@ -574,7 +563,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, i915_gem_object_unpin_map(batch->obj); intel_gt_chipset_flush(engine->gt); - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_batch; @@ -693,7 +682,7 @@ out_unpin: break; } - if (igt_flush_test(ctx->i915)) + if (igt_flush_test(engine->i915)) err = -EIO; out_batch: i915_vma_unpin_and_release(&batch, 0); @@ -706,38 +695,31 @@ static int live_dirty_whitelist(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; enum intel_engine_id id; - struct drm_file *file; - int err = 0; /* Can the user write to the whitelisted registers? */ if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ return 0; - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(gt->i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_file; - } - for_each_engine(engine, gt, id) { + struct intel_context *ce; + int err; + if (engine->whitelist.count == 0) continue; - err = check_dirty_whitelist(ctx, engine); + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = check_dirty_whitelist(ce); + intel_context_put(ce); if (err) - goto out_file; + return err; } -out_file: - mock_file_free(gt->i915, file); - return err; + return 0; } static int live_reset_whitelist(void *arg) @@ -810,8 +792,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, u64 offset = results->node.start + sizeof(u32) * i; u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - /* Clear access permission field */ - reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; *cs++ = srm; *cs++ = reg; @@ -827,12 +809,15 @@ err_req: static int scrub_whitelisted_registers(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + struct i915_address_space *vm; struct i915_request *rq; struct i915_vma *batch; int i, err = 0; u32 *cs; - batch = create_batch(ctx); + vm = i915_gem_context_get_vm_rcu(ctx); + batch = create_batch(vm); + i915_vm_put(vm); if (IS_ERR(batch)) return PTR_ERR(batch); @@ -849,6 +834,9 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, if (ro_register(reg)) continue; + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + *cs++ = reg; *cs++ = 0xffffffff; } diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c index 2a77c051f36a..aeb1d1f616e8 100644 --- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c @@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context) mutex_init(&timeline->mutex); - INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex); + INIT_ACTIVE_FENCE(&timeline->last_request); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h index 689efc66c908..d2bcc3df6183 100644 --- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h @@ -7,6 +7,8 @@ #ifndef __MOCK_TIMELINE__ #define __MOCK_TIMELINE__ +#include <linux/types.h> + struct intel_timeline; void mock_timeline_init(struct intel_timeline *timeline, u64 context); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 37f7bcbf7dac..5d00a3b2d914 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -4,6 +4,8 @@ */ #include "gt/intel_gt.h" +#include "gt/intel_gt_irq.h" +#include "gt/intel_gt_pm_irq.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" @@ -30,18 +32,17 @@ * just the HuC, but more are expected to land in the future). */ -static void gen8_guc_raise_irq(struct intel_guc *guc) +void intel_guc_notify(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); -} - -static void gen11_guc_raise_irq(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0); + /* + * On Gen11+, the value written to the register is passes as a payload + * to the FW. However, the FW currently treats all values the same way + * (H2G interrupt), so we can just write the value that the HW expects + * on older gens. + */ + intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER); } static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) @@ -77,6 +78,93 @@ void intel_guc_init_send_regs(struct intel_guc *guc) guc->send_regs.fw_domains = fw_domains; } +static void gen9_reset_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); + spin_unlock_irq(>->irq_lock); +} + +static void gen9_enable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + if (!guc->interrupts.enabled) { + WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & + gt->pm_guc_events); + guc->interrupts.enabled = true; + gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); + } + spin_unlock_irq(>->irq_lock); +} + +static void gen9_disable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + guc->interrupts.enabled = false; + + gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + gen9_reset_guc_interrupts(guc); +} + +static void gen11_reset_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); + spin_unlock_irq(>->irq_lock); +} + +static void gen11_enable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + if (!guc->interrupts.enabled) { + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); + + WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_MASK, ~events); + guc->interrupts.enabled = true; + } + spin_unlock_irq(>->irq_lock); +} + +static void gen11_disable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + guc->interrupts.enabled = false; + + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + gen11_reset_guc_interrupts(guc); +} + void intel_guc_init_early(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_gt(guc)->i915; @@ -88,47 +176,19 @@ void intel_guc_init_early(struct intel_guc *guc) mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; if (INTEL_GEN(i915) >= 11) { - guc->notify = gen11_guc_raise_irq; + guc->notify_reg = GEN11_GUC_HOST_INTERRUPT; guc->interrupts.reset = gen11_reset_guc_interrupts; guc->interrupts.enable = gen11_enable_guc_interrupts; guc->interrupts.disable = gen11_disable_guc_interrupts; } else { - guc->notify = gen8_guc_raise_irq; + guc->notify_reg = GUC_SEND_INTERRUPT; guc->interrupts.reset = gen9_reset_guc_interrupts; guc->interrupts.enable = gen9_enable_guc_interrupts; guc->interrupts.disable = gen9_disable_guc_interrupts; } } -static int guc_shared_data_create(struct intel_guc *guc) -{ - struct i915_vma *vma; - void *vaddr; - - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); - } - - guc->shared_data = vma; - guc->shared_data_vaddr = vaddr; - - return 0; -} - -static void guc_shared_data_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); -} - static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); @@ -275,14 +335,9 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_fetch; - ret = guc_shared_data_create(guc); - if (ret) - goto err_fw; - GEM_BUG_ON(!guc->shared_data); - ret = intel_guc_log_create(&guc->log); if (ret) - goto err_shared; + goto err_fw; ret = intel_guc_ads_create(guc); if (ret) @@ -317,8 +372,6 @@ err_ads: intel_guc_ads_destroy(guc); err_log: intel_guc_log_destroy(&guc->log); -err_shared: - guc_shared_data_destroy(guc); err_fw: intel_uc_fw_fini(&guc->fw); err_fetch: @@ -343,21 +396,10 @@ void intel_guc_fini(struct intel_guc *guc) intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); - guc_shared_data_destroy(guc); intel_uc_fw_fini(&guc->fw); intel_uc_fw_cleanup_fetch(&guc->fw); -} - -int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) -{ - WARN(1, "Unexpected send: action=%#x\n", *action); - return -ENODEV; -} -void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) -{ - WARN(1, "Unexpected event: no suitable handler\n"); + intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED); } /* @@ -499,6 +541,13 @@ int intel_guc_suspend(struct intel_guc *guc) }; /* + * If GuC communication is enabled but submission is not supported, + * we do not need to suspend the GuC. + */ + if (!intel_guc_submission_is_enabled(guc)) + return 0; + + /* * The ENTER_S_STATE action queues the save/restore operation in GuC FW * and then returns, so waiting on the H2G is not enough to guarantee * GuC is done. When all the processing is done, GuC writes @@ -539,19 +588,9 @@ int intel_guc_suspend(struct intel_guc *guc) int intel_guc_reset_engine(struct intel_guc *guc, struct intel_engine_cs *engine) { - u32 data[7]; - - GEM_BUG_ON(!guc->execbuf_client); - - data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET; - data[1] = engine->guc_id; - data[2] = 0; - data[3] = 0; - data[4] = 0; - data[5] = guc->execbuf_client->stage_id; - data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); + /* XXX: to be implemented with submission interface rework */ - return intel_guc_send(guc, data, ARRAY_SIZE(data)); + return -ENODEV; } /** @@ -565,6 +604,14 @@ int intel_guc_resume(struct intel_guc *guc) GUC_POWER_D0, }; + /* + * If GuC communication is enabled but submission is not supported, + * we do not need to resume the GuC but we do need to enable the + * GuC communication on resume (above). + */ + if (!intel_guc_submission_is_enabled(guc)) + return 0; + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } @@ -644,3 +691,37 @@ err: i915_gem_object_put(obj); return vma; } + +/** + * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage + * @guc: the guc + * @size: size of area to allocate (both virtual space and memory) + * @out_vma: return variable for the allocated vma pointer + * @out_vaddr: return variable for the obj mapping + * + * This wrapper calls intel_guc_allocate_vma() and then maps the allocated + * object with I915_MAP_WB. + * + * Return: 0 if successful, a negative errno code otherwise. + */ +int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, + struct i915_vma **out_vma, void **out_vaddr) +{ + struct i915_vma *vma; + void *vaddr; + + vma = intel_guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + return PTR_ERR(vaddr); + } + + *out_vma = vma; + *out_vaddr = vaddr; + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2b2f046d3cc3..910d49590068 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -20,8 +20,8 @@ struct __guc_ads_blob; /* * Top level structure of GuC. It handles firmware loading and manages client - * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy - * ExecList submission. + * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList + * submission. */ struct intel_guc { struct intel_uc_fw fw; @@ -46,15 +46,13 @@ struct intel_guc { struct i915_vma *stage_desc_pool; void *stage_desc_pool_vaddr; - struct ida stage_ids; - struct i915_vma *shared_data; - void *shared_data_vaddr; - struct intel_guc_client *execbuf_client; + struct i915_vma *workqueue; + void *workqueue_vaddr; + spinlock_t wq_lock; - DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); - /* Cyclic counter mod pagesize */ - u32 db_cacheline; + struct i915_vma *proc_desc; + void *proc_desc_vaddr; /* Control params for fw initialization */ u32 params[GUC_CTL_MAX_DWORDS]; @@ -66,44 +64,33 @@ struct intel_guc { enum forcewake_domains fw_domains; } send_regs; + /* register used to send interrupts to the GuC FW */ + i915_reg_t notify_reg; + /* Store msg (e.g. log flush) that we see while CTBs are disabled */ u32 mmio_msg; /* To serialize the intel_guc_send actions */ struct mutex send_mutex; - - /* GuC's FW specific send function */ - int (*send)(struct intel_guc *guc, const u32 *data, u32 len, - u32 *response_buf, u32 response_buf_size); - - /* GuC's FW specific event handler function */ - void (*handler)(struct intel_guc *guc); - - /* GuC's FW specific notify function */ - void (*notify)(struct intel_guc *guc); }; static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) { - return guc->send(guc, action, len, NULL, 0); + return intel_guc_ct_send(&guc->ct, action, len, NULL, 0); } static inline int intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - return guc->send(guc, action, len, response_buf, response_buf_size); -} - -static inline void intel_guc_notify(struct intel_guc *guc) -{ - guc->notify(guc); + return intel_guc_ct_send(&guc->ct, action, len, + response_buf, response_buf_size); } static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) { - guc->handler(guc); + intel_guc_ct_event_handler(&guc->ct); } /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ @@ -138,12 +125,9 @@ void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_write_params(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); -int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size); +void intel_guc_notify(struct intel_guc *guc); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); -void intel_guc_to_host_event_handler(struct intel_guc *guc); -void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len); int intel_guc_sample_forcewake(struct intel_guc *guc); @@ -151,6 +135,8 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); +int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, + struct i915_vma **out_vma, void **out_vaddr); static inline bool intel_guc_is_supported(struct intel_guc *guc) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index ca6674b8e00c..101728006ae9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -93,7 +93,8 @@ static void __guc_ads_init(struct intel_guc *guc) */ blob->ads.golden_context_lrca[engine_class] = 0; blob->ads.eng_state_size[engine_class] = - intel_engine_context_size(dev_priv, engine_class) - + intel_engine_context_size(guc_to_gt(guc), + engine_class) - skipped_size; } @@ -135,32 +136,19 @@ static void __guc_ads_init(struct intel_guc *guc) int intel_guc_ads_create(struct intel_guc *guc) { const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); - struct i915_vma *vma; - void *blob; int ret; GEM_BUG_ON(guc->ads_vma); - vma = intel_guc_allocate_vma(guc, size); - if (IS_ERR(vma)) - return PTR_ERR(vma); + ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma, + (void **)&guc->ads_blob); - blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) { - ret = PTR_ERR(blob); - goto err_vma; - } - - guc->ads_vma = vma; - guc->ads_blob = blob; + if (ret) + return ret; __guc_ads_init(guc); return 0; - -err_vma: - i915_vma_unpin_and_release(&guc->ads_vma, 0); - return ret; } void intel_guc_ads_destroy(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index b49115517510..c6f971a049f9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -37,13 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w); */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { - /* we're using static channel owners */ - ct->host_channel.owner = CTB_OWNER_HOST; - - spin_lock_init(&ct->lock); - INIT_LIST_HEAD(&ct->pending_requests); - INIT_LIST_HEAD(&ct->incoming_requests); - INIT_WORK(&ct->worker, ct_incoming_request_worker_func); + spin_lock_init(&ct->requests.lock); + INIT_LIST_HEAD(&ct->requests.pending); + INIT_LIST_HEAD(&ct->requests.incoming); + INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); } static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) @@ -64,14 +61,13 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type) } static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, - u32 cmds_addr, u32 size, u32 owner) + u32 cmds_addr, u32 size) { - CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", - desc, cmds_addr, size, owner); + CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size); memset(desc, 0, sizeof(*desc)); desc->addr = cmds_addr; desc->size = size; - desc->owner = owner; + desc->owner = CTB_OWNER_HOST; } static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) @@ -104,12 +100,11 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc, } static int guc_action_deregister_ct_buffer(struct intel_guc *guc, - u32 owner, u32 type) { u32 action[] = { INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, - owner, + CTB_OWNER_HOST, type }; int err; @@ -117,20 +112,27 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, /* Can't use generic send(), CT deregistration must go over MMIO */ err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); if (err) - DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", - guc_ct_buffer_type_to_str(type), owner, err); + DRM_ERROR("CT: deregister %s buffer failed; err=%d\n", + guc_ct_buffer_type_to_str(type), err); return err; } -static int ctch_init(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +/** + * intel_guc_ct_init - Init buffer-based communication + * @ct: pointer to CT struct + * + * Allocate memory required for buffer-based communication. + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_init(struct intel_guc_ct *ct) { - struct i915_vma *vma; + struct intel_guc *guc = ct_to_guc(ct); void *blob; int err; int i; - GEM_BUG_ON(ctch->vma); + GEM_BUG_ON(ct->vma); /* We allocate 1 page to hold both descriptors and both buffers. * ___________..................... @@ -154,71 +156,65 @@ static int ctch_init(struct intel_guc *guc, * other code will need updating as well. */ - /* allocate vma */ - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_out; + err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob); + if (err) { + DRM_ERROR("CT: channel allocation failed; err=%d\n", err); + return err; } - ctch->vma = vma; - /* map first page */ - blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) { - err = PTR_ERR(blob); - goto err_vma; - } CT_DEBUG_DRIVER("CT: vma base=%#x\n", - intel_guc_ggtt_offset(guc, ctch->vma)); + intel_guc_ggtt_offset(guc, ct->vma)); /* store pointers to desc and cmds */ - for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i; - ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; + for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { + GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); + ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i; + ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; } return 0; - -err_vma: - i915_vma_unpin_and_release(&ctch->vma, 0); -err_out: - CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", - ctch->owner, err); - return err; } -static void ctch_fini(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +/** + * intel_guc_ct_fini - Fini buffer-based communication + * @ct: pointer to CT struct + * + * Deallocate memory required for buffer-based communication. + */ +void intel_guc_ct_fini(struct intel_guc_ct *ct) { - GEM_BUG_ON(ctch->enabled); + GEM_BUG_ON(ct->enabled); - i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); + i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP); } -static int ctch_enable(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +/** + * intel_guc_ct_enable - Enable buffer based command transport. + * @ct: pointer to CT struct + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_enable(struct intel_guc_ct *ct) { + struct intel_guc *guc = ct_to_guc(ct); u32 base; int err; int i; - GEM_BUG_ON(!ctch->vma); - - GEM_BUG_ON(ctch->enabled); + GEM_BUG_ON(ct->enabled); /* vma should be already allocated and map'ed */ - base = intel_guc_ggtt_offset(guc, ctch->vma); + GEM_BUG_ON(!ct->vma); + base = intel_guc_ggtt_offset(guc, ct->vma); /* (re)initialize descriptors * cmds buffers are in the second half of the blob page */ - for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { + for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - guc_ct_buffer_desc_init(ctch->ctbs[i].desc, + guc_ct_buffer_desc_init(ct->ctbs[i].desc, base + PAGE_SIZE/4 * i + PAGE_SIZE/2, - PAGE_SIZE/4, - ctch->owner); + PAGE_SIZE/4); } /* register buffers, starting wirh RECV buffer @@ -236,38 +232,42 @@ static int ctch_enable(struct intel_guc *guc, if (unlikely(err)) goto err_deregister; - ctch->enabled = true; + ct->enabled = true; return 0; err_deregister: guc_action_deregister_ct_buffer(guc, - ctch->owner, INTEL_GUC_CT_BUFFER_TYPE_RECV); err_out: - DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); + DRM_ERROR("CT: can't open channel; err=%d\n", err); return err; } -static void ctch_disable(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +/** + * intel_guc_ct_disable - Disable buffer based command transport. + * @ct: pointer to CT struct + */ +void intel_guc_ct_disable(struct intel_guc_ct *ct) { - GEM_BUG_ON(!ctch->enabled); + struct intel_guc *guc = ct_to_guc(ct); - ctch->enabled = false; + GEM_BUG_ON(!ct->enabled); - guc_action_deregister_ct_buffer(guc, - ctch->owner, - INTEL_GUC_CT_BUFFER_TYPE_SEND); - guc_action_deregister_ct_buffer(guc, - ctch->owner, - INTEL_GUC_CT_BUFFER_TYPE_RECV); + ct->enabled = false; + + if (intel_guc_is_running(guc)) { + guc_action_deregister_ct_buffer(guc, + INTEL_GUC_CT_BUFFER_TYPE_SEND); + guc_action_deregister_ct_buffer(guc, + INTEL_GUC_CT_BUFFER_TYPE_RECV); + } } -static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) +static u32 ct_get_next_fence(struct intel_guc_ct *ct) { /* For now it's trivial */ - return ++ctch->next_fence; + return ++ct->requests.next_fence; } /** @@ -440,35 +440,34 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status) return err; } -static int ctch_send(struct intel_guc_ct *ct, - struct intel_guc_ct_channel *ctch, - const u32 *action, - u32 len, - u32 *response_buf, - u32 response_buf_size, - u32 *status) +static int ct_send(struct intel_guc_ct *ct, + const u32 *action, + u32 len, + u32 *response_buf, + u32 response_buf_size, + u32 *status) { - struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; + struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND]; struct guc_ct_buffer_desc *desc = ctb->desc; struct ct_request request; unsigned long flags; u32 fence; int err; - GEM_BUG_ON(!ctch->enabled); + GEM_BUG_ON(!ct->enabled); GEM_BUG_ON(!len); GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size); - fence = ctch_get_next_fence(ctch); + fence = ct_get_next_fence(ct); request.fence = fence; request.status = 0; request.response_len = response_buf_size; request.response_buf = response_buf; - spin_lock_irqsave(&ct->lock, flags); - list_add_tail(&request.link, &ct->pending_requests); - spin_unlock_irqrestore(&ct->lock, flags); + spin_lock_irqsave(&ct->requests.lock, flags); + list_add_tail(&request.link, &ct->requests.pending); + spin_unlock_irqrestore(&ct->requests.lock, flags); err = ctb_write(ctb, action, len, fence, !!response_buf); if (unlikely(err)) @@ -501,9 +500,9 @@ static int ctch_send(struct intel_guc_ct *ct, } unlink: - spin_lock_irqsave(&ct->lock, flags); + spin_lock_irqsave(&ct->requests.lock, flags); list_del(&request.link); - spin_unlock_irqrestore(&ct->lock, flags); + spin_unlock_irqrestore(&ct->requests.lock, flags); return err; } @@ -511,18 +510,21 @@ unlink: /* * Command Transport (CT) buffer based GuC send function. */ -int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, +int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - struct intel_guc_ct *ct = &guc->ct; - struct intel_guc_ct_channel *ctch = &ct->host_channel; + struct intel_guc *guc = ct_to_guc(ct); u32 status = ~0; /* undefined */ int ret; + if (unlikely(!ct->enabled)) { + WARN(1, "Unexpected send: action=%#x\n", *action); + return -ENODEV; + } + mutex_lock(&guc->send_mutex); - ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, - &status); + ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); if (unlikely(ret < 0)) { DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", action[0], ret, status); @@ -653,8 +655,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); - spin_lock(&ct->lock); - list_for_each_entry(req, &ct->pending_requests, link) { + spin_lock(&ct->requests.lock); + list_for_each_entry(req, &ct->requests.pending, link) { if (unlikely(fence != req->fence)) { CT_DEBUG_DRIVER("CT: request %u awaits response\n", req->fence); @@ -672,7 +674,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) found = true; break; } - spin_unlock(&ct->lock); + spin_unlock(&ct->requests.lock); if (!found) DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); @@ -710,13 +712,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) u32 *payload; bool done; - spin_lock_irqsave(&ct->lock, flags); - request = list_first_entry_or_null(&ct->incoming_requests, + spin_lock_irqsave(&ct->requests.lock, flags); + request = list_first_entry_or_null(&ct->requests.incoming, struct ct_incoming_request, link); if (request) list_del(&request->link); - done = !!list_empty(&ct->incoming_requests); - spin_unlock_irqrestore(&ct->lock, flags); + done = !!list_empty(&ct->requests.incoming); + spin_unlock_irqrestore(&ct->requests.lock, flags); if (!request) return true; @@ -734,12 +736,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) static void ct_incoming_request_worker_func(struct work_struct *w) { - struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); + struct intel_guc_ct *ct = + container_of(w, struct intel_guc_ct, requests.worker); bool done; done = ct_process_incoming_requests(ct); if (!done) - queue_work(system_unbound_wq, &ct->worker); + queue_work(system_unbound_wq, &ct->requests.worker); } /** @@ -777,23 +780,28 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) } memcpy(request->msg, msg, 4 * msglen); - spin_lock_irqsave(&ct->lock, flags); - list_add_tail(&request->link, &ct->incoming_requests); - spin_unlock_irqrestore(&ct->lock, flags); + spin_lock_irqsave(&ct->requests.lock, flags); + list_add_tail(&request->link, &ct->requests.incoming); + spin_unlock_irqrestore(&ct->requests.lock, flags); - queue_work(system_unbound_wq, &ct->worker); + queue_work(system_unbound_wq, &ct->requests.worker); return 0; } -static void ct_process_host_channel(struct intel_guc_ct *ct) +/* + * When we're communicating with the GuC over CT, GuC uses events + * to notify us about new messages being posted on the RECV buffer. + */ +void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { - struct intel_guc_ct_channel *ctch = &ct->host_channel; - struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV]; + struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV]; u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ int err = 0; - if (!ctch->enabled) + if (unlikely(!ct->enabled)) { + WARN(1, "Unexpected GuC event received while CT disabled!\n"); return; + } do { err = ctb_read(ctb, msg); @@ -812,86 +820,3 @@ static void ct_process_host_channel(struct intel_guc_ct *ct) } } -/* - * When we're communicating with the GuC over CT, GuC uses events - * to notify us about new messages being posted on the RECV buffer. - */ -void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) -{ - struct intel_guc_ct *ct = &guc->ct; - - ct_process_host_channel(ct); -} - -/** - * intel_guc_ct_init - Init CT communication - * @ct: pointer to CT struct - * - * Allocate memory required for communication via - * the CT channel. - * - * Return: 0 on success, a negative errno code on failure. - */ -int intel_guc_ct_init(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - int err; - - err = ctch_init(guc, ctch); - if (unlikely(err)) { - DRM_ERROR("CT: can't open channel %d; err=%d\n", - ctch->owner, err); - return err; - } - - GEM_BUG_ON(!ctch->vma); - return 0; -} - -/** - * intel_guc_ct_fini - Fini CT communication - * @ct: pointer to CT struct - * - * Deallocate memory required for communication via - * the CT channel. - */ -void intel_guc_ct_fini(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - - ctch_fini(guc, ctch); -} - -/** - * intel_guc_ct_enable - Enable buffer based command transport. - * @ct: pointer to CT struct - * - * Return: 0 on success, a negative errno code on failure. - */ -int intel_guc_ct_enable(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - - if (ctch->enabled) - return 0; - - return ctch_enable(guc, ctch); -} - -/** - * intel_guc_ct_disable - Disable buffer based command transport. - * @ct: pointer to CT struct - */ -void intel_guc_ct_disable(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - - if (!ctch->enabled) - return; - - ctch_disable(guc, ctch); -} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 7c24d83f5c24..3e7fe237cfa5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -35,44 +35,28 @@ struct intel_guc_ct_buffer { u32 *cmds; }; -/** Represents pair of command transport buffers. - * - * Buffers go in pairs to allow bi-directional communication. - * To simplify the code we place both of them in the same vma. - * Buffers from the same pair must share unique owner id. - * - * @vma: pointer to the vma with pair of CT buffers - * @ctbs: buffers for sending(0) and receiving(1) commands - * @owner: unique identifier - * @next_fence: fence to be used with next send command - */ -struct intel_guc_ct_channel { - struct i915_vma *vma; - struct intel_guc_ct_buffer ctbs[2]; - u32 owner; - u32 next_fence; - bool enabled; -}; -/** Holds all command transport channels. +/** Top-level structure for Command Transport related data * - * @host_channel: main channel used by the host + * Includes a pair of CT buffers for bi-directional communication and tracking + * for the H2G and G2H requests sent and received through the buffers. */ struct intel_guc_ct { - struct intel_guc_ct_channel host_channel; - /* other channels are tbd */ + struct i915_vma *vma; + bool enabled; - /** @lock: protects pending requests list */ - spinlock_t lock; + /* buffers for sending(0) and receiving(1) commands */ + struct intel_guc_ct_buffer ctbs[2]; - /** @pending_requests: list of requests waiting for response */ - struct list_head pending_requests; + struct { + u32 next_fence; /* fence to be used with next request to send */ - /** @incoming_requests: list of incoming requests */ - struct list_head incoming_requests; + spinlock_t lock; /* protects pending requests list */ + struct list_head pending; /* requests waiting for response */ - /** @worker: worker for handling incoming requests */ - struct work_struct worker; + struct list_head incoming; /* incoming requests */ + struct work_struct worker; /* handler for incoming requests */ + } requests; }; void intel_guc_ct_init_early(struct intel_guc_ct *ct); @@ -81,13 +65,13 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct); int intel_guc_ct_enable(struct intel_guc_ct *ct); void intel_guc_ct_disable(struct intel_guc_ct *ct); -static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) +static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct) { - ct->host_channel.enabled = false; + return ct->enabled; } -int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, +int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); -void intel_guc_to_host_event_handler_ct(struct intel_guc *guc); +void intel_guc_ct_event_handler(struct intel_guc_ct *ct); #endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 5528224448f6..3a1c47d600ea 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -149,7 +149,7 @@ int intel_guc_fw_upload(struct intel_guc *guc) * Current uCode expects the code to be loaded at 8k; locations below * this are used for the stack. */ - ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE); + ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 1d3cdd67ca2f..a6b733c146c9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -31,7 +31,6 @@ #define GUC_DOORBELL_INVALID 256 -#define GUC_DB_SIZE (PAGE_SIZE) #define GUC_WQ_SIZE (PAGE_SIZE * 2) /* Work queue item header definitions */ @@ -548,6 +547,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, @@ -556,7 +556,6 @@ enum intel_guc_action { INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, - INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 2cf2d3314f62..caed0d57e704 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -226,7 +226,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) mutex_lock(&log->relay.lock); - if (WARN_ON(!intel_guc_log_relay_enabled(log))) + if (WARN_ON(!intel_guc_log_relay_created(log))) goto out_unlock; /* Get the pointer to shared GuC log buffer */ @@ -361,6 +361,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log) { mutex_init(&log->relay.lock); INIT_WORK(&log->relay.flush_work, capture_logs_work); + log->relay.started = false; } static int guc_log_relay_create(struct intel_guc_log *log) @@ -546,7 +547,7 @@ out_unlock: return ret; } -bool intel_guc_log_relay_enabled(const struct intel_guc_log *log) +bool intel_guc_log_relay_created(const struct intel_guc_log *log) { return log->relay.buf_addr; } @@ -560,7 +561,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) mutex_lock(&log->relay.lock); - if (intel_guc_log_relay_enabled(log)) { + if (intel_guc_log_relay_created(log)) { ret = -EEXIST; goto out_unlock; } @@ -585,6 +586,21 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) mutex_unlock(&log->relay.lock); + return 0; + +out_relay: + guc_log_relay_destroy(log); +out_unlock: + mutex_unlock(&log->relay.lock); + + return ret; +} + +int intel_guc_log_relay_start(struct intel_guc_log *log) +{ + if (log->relay.started) + return -EEXIST; + guc_log_enable_flush_events(log); /* @@ -594,14 +610,9 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) */ queue_work(system_highpri_wq, &log->relay.flush_work); - return 0; - -out_relay: - guc_log_relay_destroy(log); -out_unlock: - mutex_unlock(&log->relay.lock); + log->relay.started = true; - return ret; + return 0; } void intel_guc_log_relay_flush(struct intel_guc_log *log) @@ -609,6 +620,9 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) struct intel_guc *guc = log_to_guc(log); intel_wakeref_t wakeref; + if (!log->relay.started) + return; + /* * Before initiating the forceful flush, wait for any pending/ongoing * flush to complete otherwise forceful flush may not actually happen. @@ -622,18 +636,33 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) guc_log_capture_logs(log); } -void intel_guc_log_relay_close(struct intel_guc_log *log) +/* + * Stops the relay log. Called from intel_guc_log_relay_close(), so no + * possibility of race with start/flush since relay_write cannot race + * relay_close. + */ +static void guc_log_relay_stop(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + if (!log->relay.started) + return; + guc_log_disable_flush_events(log); intel_synchronize_irq(i915); flush_work(&log->relay.flush_work); + log->relay.started = false; +} + +void intel_guc_log_relay_close(struct intel_guc_log *log) +{ + guc_log_relay_stop(log); + mutex_lock(&log->relay.lock); - GEM_BUG_ON(!intel_guc_log_relay_enabled(log)); + GEM_BUG_ON(!intel_guc_log_relay_created(log)); guc_log_unmap(log); guc_log_relay_destroy(log); mutex_unlock(&log->relay.lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index 6f764879acb1..c252c022c5fc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -47,6 +47,7 @@ struct intel_guc_log { struct i915_vma *vma; struct { void *buf_addr; + bool started; struct work_struct flush_work; struct rchan *channel; struct mutex lock; @@ -65,8 +66,9 @@ int intel_guc_log_create(struct intel_guc_log *log); void intel_guc_log_destroy(struct intel_guc_log *log); int intel_guc_log_set_level(struct intel_guc_log *log, u32 level); -bool intel_guc_log_relay_enabled(const struct intel_guc_log *log); +bool intel_guc_log_relay_created(const struct intel_guc_log *log); int intel_guc_log_relay_open(struct intel_guc_log *log); +int intel_guc_log_relay_start(struct intel_guc_log *log); void intel_guc_log_relay_flush(struct intel_guc_log *log); void intel_guc_log_relay_close(struct intel_guc_log *log); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 009e54a3764f..9e42324fdecd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -6,26 +6,18 @@ #include <linux/circ_buf.h> #include "gem/i915_gem_context.h" - #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" + #include "intel_guc_submission.h" #include "i915_drv.h" #include "i915_trace.h" -enum { - GUC_PREEMPT_NONE = 0, - GUC_PREEMPT_INPROGRESS, - GUC_PREEMPT_FINISHED, -}; -#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 -#define GUC_PREEMPT_BREADCRUMB_BYTES \ - (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) - /** * DOC: GuC-based command submission * @@ -35,25 +27,14 @@ enum { * code) matches the old submission model and will be updated as part of the * upgrade to the new flow. * - * GuC client: - * A intel_guc_client refers to a submission path through GuC. Currently, there - * is only one client, which is charged with all submissions to the GuC. This - * struct is the owner of a doorbell, a process descriptor and a workqueue (all - * of them inside a single gem object that contains all required pages for these - * elements). - * * GuC stage descriptor: * During initialization, the driver allocates a static pool of 1024 such - * descriptors, and shares them with the GuC. - * Currently, there exists a 1:1 mapping between a intel_guc_client and a - * guc_stage_desc (via the client's stage_id), so effectively only one - * gets used. This stage descriptor lets the GuC know about the doorbell, - * workqueue and process descriptor. Theoretically, it also lets the GuC - * know about our HW contexts (context ID, etc...), but we actually - * employ a kind of submission where the GuC uses the LRCA sent via the work - * item instead (the single guc_stage_desc associated to execbuf client - * contains information about the default kernel context only, but this is - * essentially unused). This is called a "proxy" submission. + * descriptors, and shares them with the GuC. Currently, we only use one + * descriptor. This stage descriptor lets the GuC know about the workqueue and + * process descriptor. Theoretically, it also lets the GuC know about our HW + * contexts (context ID, etc...), but we actually employ a kind of submission + * where the GuC uses the LRCA sent via the work item instead. This is called + * a "proxy" submission. * * The Scratch registers: * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes @@ -62,11 +43,6 @@ enum { * Firmware writes a success/fail code back to the action register after * processes the request. The kernel driver polls waiting for this update and * then proceeds. - * See intel_guc_send() - * - * Doorbells: - * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) - * mapped into process space. * * Work Items: * There are several types of work items that the host may place into a @@ -83,213 +59,45 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) return rb_entry(rb, struct i915_priolist, node); } -static inline bool is_high_priority(struct intel_guc_client *client) -{ - return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH || - client->priority == GUC_CLIENT_PRIORITY_HIGH); -} - -static int reserve_doorbell(struct intel_guc_client *client) -{ - unsigned long offset; - unsigned long end; - u16 id; - - GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID); - - /* - * The bitmap tracks which doorbell registers are currently in use. - * It is split into two halves; the first half is used for normal - * priority contexts, the second half for high-priority ones. - */ - offset = 0; - end = GUC_NUM_DOORBELLS / 2; - if (is_high_priority(client)) { - offset = end; - end += offset; - } - - id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset); - if (id == end) - return -ENOSPC; - - __set_bit(id, client->guc->doorbell_bitmap); - client->doorbell_id = id; - DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n", - client->stage_id, yesno(is_high_priority(client)), - id); - return 0; -} - -static bool has_doorbell(struct intel_guc_client *client) -{ - if (client->doorbell_id == GUC_DOORBELL_INVALID) - return false; - - return test_bit(client->doorbell_id, client->guc->doorbell_bitmap); -} - -static void unreserve_doorbell(struct intel_guc_client *client) -{ - GEM_BUG_ON(!has_doorbell(client)); - - __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap); - client->doorbell_id = GUC_DOORBELL_INVALID; -} - -/* - * Tell the GuC to allocate or deallocate a specific doorbell - */ - -static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id) +static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id) { - u32 action[] = { - INTEL_GUC_ACTION_ALLOCATE_DOORBELL, - stage_id - }; + struct guc_stage_desc *base = guc->stage_desc_pool_vaddr; - return intel_guc_send(guc, action, ARRAY_SIZE(action)); + return &base[id]; } -static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id) +static int guc_workqueue_create(struct intel_guc *guc) { - u32 action[] = { - INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, - stage_id - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); + return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue, + &guc->workqueue_vaddr); } -static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client) +static void guc_workqueue_destroy(struct intel_guc *guc) { - struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr; - - return &base[client->stage_id]; + i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP); } /* - * Initialise, update, or clear doorbell data shared with the GuC - * - * These functions modify shared data and so need access to the mapped - * client object which contains the page being used for the doorbell + * Initialise the process descriptor shared with the GuC firmware. */ - -static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id) +static int guc_proc_desc_create(struct intel_guc *guc) { - struct guc_stage_desc *desc; + const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc)); - /* Update the GuC's idea of the doorbell ID */ - desc = __get_stage_desc(client); - desc->db_id = new_id; + return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc, + &guc->proc_desc_vaddr); } -static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) +static void guc_proc_desc_destroy(struct intel_guc *guc) { - return client->vaddr + client->doorbell_offset; -} - -static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) -{ - struct intel_uncore *uncore = guc_to_gt(guc)->uncore; - - GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; + i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP); } -static void __init_doorbell(struct intel_guc_client *client) -{ - struct guc_doorbell_info *doorbell; - - doorbell = __get_doorbell(client); - doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = 0; -} - -static void __fini_doorbell(struct intel_guc_client *client) -{ - struct guc_doorbell_info *doorbell; - u16 db_id = client->doorbell_id; - - doorbell = __get_doorbell(client); - doorbell->db_status = GUC_DOORBELL_DISABLED; - - /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit - * to go to zero after updating db_status before we call the GuC to - * release the doorbell - */ - if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) - WARN_ONCE(true, "Doorbell never became invalid after disable\n"); -} - -static int create_doorbell(struct intel_guc_client *client) -{ - int ret; - - if (WARN_ON(!has_doorbell(client))) - return -ENODEV; /* internal setup error, should never happen */ - - __update_doorbell_desc(client, client->doorbell_id); - __init_doorbell(client); - - ret = __guc_allocate_doorbell(client->guc, client->stage_id); - if (ret) { - __fini_doorbell(client); - __update_doorbell_desc(client, GUC_DOORBELL_INVALID); - DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", - client->stage_id, ret); - return ret; - } - - return 0; -} - -static int destroy_doorbell(struct intel_guc_client *client) -{ - int ret; - - GEM_BUG_ON(!has_doorbell(client)); - - __fini_doorbell(client); - ret = __guc_deallocate_doorbell(client->guc, client->stage_id); - if (ret) - DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", - client->stage_id, ret); - - __update_doorbell_desc(client, GUC_DOORBELL_INVALID); - - return ret; -} - -static unsigned long __select_cacheline(struct intel_guc *guc) -{ - unsigned long offset; - - /* Doorbell uses a single cache line within a page */ - offset = offset_in_page(guc->db_cacheline); - - /* Moving to next cache line to reduce contention */ - guc->db_cacheline += cache_line_size(); - - DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n", - offset, guc->db_cacheline, cache_line_size()); - return offset; -} - -static inline struct guc_process_desc * -__get_process_desc(struct intel_guc_client *client) -{ - return client->vaddr + client->proc_desc_offset; -} - -/* - * Initialise the process descriptor shared with the GuC firmware. - */ -static void guc_proc_desc_init(struct intel_guc_client *client) +static void guc_proc_desc_init(struct intel_guc *guc) { struct guc_process_desc *desc; - desc = memset(__get_process_desc(client), 0, sizeof(*desc)); + desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc)); /* * XXX: pDoorbell and WQVBaseAddress are pointers in process address @@ -300,47 +108,27 @@ static void guc_proc_desc_init(struct intel_guc_client *client) desc->wq_base_addr = 0; desc->db_base_addr = 0; - desc->stage_id = client->stage_id; desc->wq_size_bytes = GUC_WQ_SIZE; desc->wq_status = WQ_STATUS_ACTIVE; - desc->priority = client->priority; + desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL; } -static void guc_proc_desc_fini(struct intel_guc_client *client) +static void guc_proc_desc_fini(struct intel_guc *guc) { - struct guc_process_desc *desc; - - desc = __get_process_desc(client); - memset(desc, 0, sizeof(*desc)); + memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc)); } static int guc_stage_desc_pool_create(struct intel_guc *guc) { - struct i915_vma *vma; - void *vaddr; - - vma = intel_guc_allocate_vma(guc, - PAGE_ALIGN(sizeof(struct guc_stage_desc) * - GUC_MAX_STAGE_DESCRIPTORS)); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); - } + u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) * + GUC_MAX_STAGE_DESCRIPTORS); - guc->stage_desc_pool = vma; - guc->stage_desc_pool_vaddr = vaddr; - ida_init(&guc->stage_ids); - - return 0; + return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool, + &guc->stage_desc_pool_vaddr); } static void guc_stage_desc_pool_destroy(struct intel_guc *guc) { - ida_destroy(&guc->stage_ids); i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); } @@ -348,63 +136,49 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc) * Initialise/clear the stage descriptor shared with the GuC firmware. * * This descriptor tells the GuC where (in GGTT space) to find the important - * data structures relating to this client (doorbell, process descriptor, - * write queue, etc). + * data structures related to work submission (process descriptor, write queue, + * etc). */ -static void guc_stage_desc_init(struct intel_guc_client *client) +static void guc_stage_desc_init(struct intel_guc *guc) { - struct intel_guc *guc = client->guc; struct guc_stage_desc *desc; - u32 gfx_addr; - desc = __get_stage_desc(client); + /* we only use 1 stage desc, so hardcode it to 0 */ + desc = __get_stage_desc(guc, 0); memset(desc, 0, sizeof(*desc)); desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL; - if (is_high_priority(client)) - desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT; - desc->stage_id = client->stage_id; - desc->priority = client->priority; - desc->db_id = client->doorbell_id; - /* - * The doorbell, process descriptor, and workqueue are all parts - * of the client object, which the GuC will reference via the GGTT - */ - gfx_addr = intel_guc_ggtt_offset(guc, client->vma); - desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + - client->doorbell_offset; - desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client)); - desc->db_trigger_uk = gfx_addr + client->doorbell_offset; - desc->process_desc = gfx_addr + client->proc_desc_offset; - desc->wq_addr = gfx_addr + GUC_DB_SIZE; - desc->wq_size = GUC_WQ_SIZE; + desc->stage_id = 0; + desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL; - desc->desc_private = ptr_to_u64(client); + desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc); + desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue); + desc->wq_size = GUC_WQ_SIZE; } -static void guc_stage_desc_fini(struct intel_guc_client *client) +static void guc_stage_desc_fini(struct intel_guc *guc) { struct guc_stage_desc *desc; - desc = __get_stage_desc(client); + desc = __get_stage_desc(guc, 0); memset(desc, 0, sizeof(*desc)); } /* Construct a Work Item and append it to the GuC's Work Queue */ -static void guc_wq_item_append(struct intel_guc_client *client, +static void guc_wq_item_append(struct intel_guc *guc, u32 target_engine, u32 context_desc, u32 ring_tail, u32 fence_id) { /* wqi_len is in DWords, and does not include the one-word header */ const size_t wqi_size = sizeof(struct guc_wq_item); const u32 wqi_len = wqi_size / sizeof(u32) - 1; - struct guc_process_desc *desc = __get_process_desc(client); + struct guc_process_desc *desc = guc->proc_desc_vaddr; struct guc_wq_item *wqi; u32 wq_off; - lockdep_assert_held(&client->wq_lock); + lockdep_assert_held(&guc->wq_lock); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -424,58 +198,30 @@ static void guc_wq_item_append(struct intel_guc_client *client, GUC_WQ_SIZE) < wqi_size); GEM_BUG_ON(wq_off & (wqi_size - 1)); - /* WQ starts from the page after doorbell / process_desc */ - wqi = client->vaddr + wq_off + GUC_DB_SIZE; - - if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { - wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); - } else { - /* Now fill in the 4-word work queue item */ - wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (target_engine << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; - wqi->context_desc = context_desc; - wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; - GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); - wqi->fence_id = fence_id; - } + wqi = guc->workqueue_vaddr + wq_off; + + /* Now fill in the 4-word work queue item */ + wqi->header = WQ_TYPE_INORDER | + (wqi_len << WQ_LEN_SHIFT) | + (target_engine << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; + wqi->context_desc = context_desc; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); + wqi->fence_id = fence_id; /* Make the update visible to GuC */ WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); } -static void guc_ring_doorbell(struct intel_guc_client *client) -{ - struct guc_doorbell_info *db; - u32 cookie; - - lockdep_assert_held(&client->wq_lock); - - /* pointer of current doorbell cacheline */ - db = __get_doorbell(client); - - /* - * We're not expecting the doorbell cookie to change behind our back, - * we also need to treat 0 as a reserved value. - */ - cookie = READ_ONCE(db->cookie); - WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie); - - /* XXX: doorbell was lost and need to acquire it again */ - GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); -} - static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { - struct intel_guc_client *client = guc->execbuf_client; struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); + u32 ctx_desc = lower_32_bits(rq->context->lrc_desc); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); - guc_wq_item_append(client, engine->guc_id, ctx_desc, + guc_wq_item_append(guc, engine->guc_id, ctx_desc, ring_tail, rq->fence.seqno); - guc_ring_doorbell(client); } /* @@ -487,10 +233,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) */ static void flush_ggtt_writes(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - if (i915_vma_is_map_and_fenceable(vma)) - intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); + intel_uncore_posting_read_fw(vma->vm->gt->uncore, + GUC_STATUS); } static void guc_submit(struct intel_engine_cs *engine, @@ -498,9 +243,8 @@ static void guc_submit(struct intel_engine_cs *engine, struct i915_request **end) { struct intel_guc *guc = &engine->gt->uc.guc; - struct intel_guc_client *client = guc->execbuf_client; - spin_lock(&client->wq_lock); + spin_lock(&guc->wq_lock); do { struct i915_request *rq = *out++; @@ -509,7 +253,7 @@ static void guc_submit(struct intel_engine_cs *engine, guc_add_request(guc, rq); } while (out != end); - spin_unlock(&client->wq_lock); + spin_unlock(&guc->wq_lock); } static inline int rq_prio(const struct i915_request *rq) @@ -528,7 +272,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx) * required if we generalise the inflight tracking. */ - intel_gt_pm_get(rq->engine->gt); + __intel_gt_pm_get(rq->engine->gt); return i915_request_get(rq); } @@ -536,7 +280,7 @@ static void schedule_out(struct i915_request *rq) { trace_i915_request_out(rq); - intel_gt_pm_put(rq->engine->gt); + intel_gt_pm_put_async(rq->engine->gt); i915_request_put(rq); } @@ -571,7 +315,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { - if (last && rq->hw_context != last->hw_context) { + if (last && rq->context != last->context) { if (port == last_port) goto done; @@ -630,7 +374,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); /* * Prevent request submission to the hardware until we have @@ -657,7 +401,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists) memset(execlists->inflight, 0, sizeof(execlists->inflight)); } -static void guc_reset(struct intel_engine_cs *engine, bool stalled) +static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request *rq; @@ -676,20 +420,20 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) stalled = false; __i915_request_reset(rq, stalled); - intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); + intel_lr_context_reset(engine, rq->context, rq->head, stalled); out_unlock: spin_unlock_irqrestore(&engine->active.lock, flags); } -static void guc_cancel_requests(struct intel_engine_cs *engine) +static void guc_reset_cancel(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_request *rq, *rn; struct rb_node *rb; unsigned long flags; - GEM_TRACE("%s\n", engine->name); + ENGINE_TRACE(engine, "\n"); /* * Before we call engine->cancel_requests(), we should have exclusive @@ -750,8 +494,8 @@ static void guc_reset_finish(struct intel_engine_cs *engine) /* And kick in case we missed a new request submission. */ tasklet_hi_schedule(&execlists->tasklet); - GEM_TRACE("%s: depth->%d\n", engine->name, - atomic_read(&execlists->tasklet.count)); + ENGINE_TRACE(engine, "depth->%d\n", + atomic_read(&execlists->tasklet.count)); } /* @@ -760,213 +504,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine) * path of guc_submit() above. */ -/* Check that a doorbell register is in the expected state */ -static bool doorbell_ok(struct intel_guc *guc, u16 db_id) -{ - bool valid; - - GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - - valid = __doorbell_valid(guc, db_id); - - if (test_bit(db_id, guc->doorbell_bitmap) == valid) - return true; - - DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", - db_id, yesno(valid)); - - return false; -} - -static bool guc_verify_doorbells(struct intel_guc *guc) -{ - bool doorbells_ok = true; - u16 db_id; - - for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) - if (!doorbell_ok(guc, db_id)) - doorbells_ok = false; - - return doorbells_ok; -} - -/** - * guc_client_alloc() - Allocate an intel_guc_client - * @guc: the intel_guc structure - * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW - * The kernel client to replace ExecList submission is created with - * NORMAL priority. Priority of a client for scheduler can be HIGH, - * while a preemption context can use CRITICAL. - * - * Return: An intel_guc_client object if success, else NULL. - */ -static struct intel_guc_client * -guc_client_alloc(struct intel_guc *guc, u32 priority) -{ - struct intel_guc_client *client; - struct i915_vma *vma; - void *vaddr; - int ret; - - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) - return ERR_PTR(-ENOMEM); - - client->guc = guc; - client->priority = priority; - client->doorbell_id = GUC_DOORBELL_INVALID; - spin_lock_init(&client->wq_lock); - - ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, - GFP_KERNEL); - if (ret < 0) - goto err_client; - - client->stage_id = ret; - - /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_id; - } - - /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ - client->vma = vma; - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_vma; - } - client->vaddr = vaddr; - - ret = reserve_doorbell(client); - if (ret) - goto err_vaddr; - - client->doorbell_offset = __select_cacheline(guc); - - /* - * Since the doorbell only requires a single cacheline, we can save - * space by putting the application process descriptor in the same - * page. Use the half of the page that doesn't include the doorbell. - */ - if (client->doorbell_offset >= (GUC_DB_SIZE / 2)) - client->proc_desc_offset = 0; - else - client->proc_desc_offset = (GUC_DB_SIZE / 2); - - DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", - priority, client, client->stage_id); - DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", - client->doorbell_id, client->doorbell_offset); - - return client; - -err_vaddr: - i915_gem_object_unpin_map(client->vma->obj); -err_vma: - i915_vma_unpin_and_release(&client->vma, 0); -err_id: - ida_simple_remove(&guc->stage_ids, client->stage_id); -err_client: - kfree(client); - return ERR_PTR(ret); -} - -static void guc_client_free(struct intel_guc_client *client) -{ - unreserve_doorbell(client); - i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); - ida_simple_remove(&client->guc->stage_ids, client->stage_id); - kfree(client); -} - -static inline bool ctx_save_restore_disabled(struct intel_context *ce) -{ - u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; - -#define SR_DISABLED \ - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) - - return (sr & SR_DISABLED) == SR_DISABLED; - -#undef SR_DISABLED -} - -static int guc_clients_create(struct intel_guc *guc) -{ - struct intel_guc_client *client; - - GEM_BUG_ON(guc->execbuf_client); - - client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); - if (IS_ERR(client)) { - DRM_ERROR("Failed to create GuC client for submission!\n"); - return PTR_ERR(client); - } - guc->execbuf_client = client; - - return 0; -} - -static void guc_clients_destroy(struct intel_guc *guc) -{ - struct intel_guc_client *client; - - client = fetch_and_zero(&guc->execbuf_client); - if (client) - guc_client_free(client); -} - -static int __guc_client_enable(struct intel_guc_client *client) -{ - int ret; - - guc_proc_desc_init(client); - guc_stage_desc_init(client); - - ret = create_doorbell(client); - if (ret) - goto fail; - - return 0; - -fail: - guc_stage_desc_fini(client); - guc_proc_desc_fini(client); - return ret; -} - -static void __guc_client_disable(struct intel_guc_client *client) -{ - /* - * By the time we're here, GuC may have already been reset. if that is - * the case, instead of trying (in vain) to communicate with it, let's - * just cleanup the doorbell HW and our internal state. - */ - if (intel_guc_is_running(client->guc)) - destroy_doorbell(client); - else - __fini_doorbell(client); - - guc_stage_desc_fini(client); - guc_proc_desc_fini(client); -} - -static int guc_clients_enable(struct intel_guc *guc) -{ - return __guc_client_enable(guc->execbuf_client); -} - -static void guc_clients_disable(struct intel_guc *guc) -{ - if (guc->execbuf_client) - __guc_client_disable(guc->execbuf_client); -} - /* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. @@ -987,13 +524,20 @@ int intel_guc_submission_init(struct intel_guc *guc) */ GEM_BUG_ON(!guc->stage_desc_pool); - WARN_ON(!guc_verify_doorbells(guc)); - ret = guc_clients_create(guc); + ret = guc_workqueue_create(guc); if (ret) goto err_pool; + ret = guc_proc_desc_create(guc); + if (ret) + goto err_workqueue; + + spin_lock_init(&guc->wq_lock); + return 0; +err_workqueue: + guc_workqueue_destroy(guc); err_pool: guc_stage_desc_pool_destroy(guc); return ret; @@ -1001,83 +545,37 @@ err_pool: void intel_guc_submission_fini(struct intel_guc *guc) { - guc_clients_destroy(guc); - WARN_ON(!guc_verify_doorbells(guc)); - - if (guc->stage_desc_pool) + if (guc->stage_desc_pool) { + guc_proc_desc_destroy(guc); + guc_workqueue_destroy(guc); guc_stage_desc_pool_destroy(guc); + } } static void guc_interrupts_capture(struct intel_gt *gt) { - struct intel_rps *rps = >->i915->gt_pm.rps; struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int irqs; + u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; + u32 dmask = irqs << 16 | irqs; - /* tell all command streamers to forward interrupts (but not vblank) - * to GuC - */ - irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, gt, id) - ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); - - /* route USER_INTERRUPT to Host, all others are sent to GuC. */ - irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - /* These three registers have the same bit definitions */ - intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs); - intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs); - intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs); + GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); - /* - * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all - * (unmasked) PM interrupts to the GuC. All other bits of this - * register *disable* generation of a specific interrupt. - * - * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when - * writing to the PM interrupt mask register, i.e. interrupts - * that must not be disabled. - * - * If the GuC is handling these interrupts, then we must not let - * the PM code disable ANY interrupt that the GuC is expecting. - * So for each ENABLED (0) bit in this register, we must SET the - * bit in pm_intrmsk_mbz so that it's left enabled for the GuC. - * GuC needs ARAT expired interrupt unmasked hence it is set in - * pm_intrmsk_mbz. - * - * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will - * result in the register bit being left SET! - */ - rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; - rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; + /* Don't handle the ctx switch interrupt in GuC submission mode */ + intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0); + intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0); } static void guc_interrupts_release(struct intel_gt *gt) { - struct intel_rps *rps = >->i915->gt_pm.rps; struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int irqs; + u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; + u32 dmask = irqs << 16 | irqs; - /* - * tell all command streamers NOT to forward interrupts or vblank - * to GuC. - */ - irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); - irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, gt, id) - ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); - - /* route all GT interrupts to the host */ - intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0); - intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0); - intel_uncore_write(uncore, GUC_WD_VECS_IER, 0); - - rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; + GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); + + /* Handle ctx switch interrupts again */ + intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask); + intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask); } static void guc_set_default_submission(struct intel_engine_cs *engine) @@ -1101,11 +599,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) engine->park = engine->unpark = NULL; engine->reset.prepare = guc_reset_prepare; - engine->reset.reset = guc_reset; + engine->reset.rewind = guc_reset_rewind; + engine->reset.cancel = guc_reset_cancel; engine->reset.finish = guc_reset_finish; - engine->cancel_requests = guc_cancel_requests; - engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; @@ -1118,16 +615,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) GEM_BUG_ON(engine->irq_enable || engine->irq_disable); } -int intel_guc_submission_enable(struct intel_guc *guc) +void intel_guc_submission_enable(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; enum intel_engine_id id; - int err; - - err = i915_inject_load_error(gt->i915, -ENXIO); - if (err) - return err; /* * We're using GuC work items for submitting work through GuC. Since @@ -1142,11 +634,8 @@ int intel_guc_submission_enable(struct intel_guc *guc) sizeof(struct guc_wq_item) * I915_NUM_ENGINES > GUC_WQ_SIZE); - GEM_BUG_ON(!guc->execbuf_client); - - err = guc_clients_enable(guc); - if (err) - return err; + guc_proc_desc_init(guc); + guc_stage_desc_init(guc); /* Take over from manual control of ELSP (execlists) */ guc_interrupts_capture(gt); @@ -1155,8 +644,6 @@ int intel_guc_submission_enable(struct intel_guc *guc) engine->set_default_submission = guc_set_default_submission; engine->set_default_submission(engine); } - - return 0; } void intel_guc_submission_disable(struct intel_guc *guc) @@ -1165,8 +652,12 @@ void intel_guc_submission_disable(struct intel_guc *guc) GEM_BUG_ON(gt->awake); /* GT should be parked first */ + /* Note: By the time we're here, GuC may have already been reset */ + guc_interrupts_release(gt); - guc_clients_disable(guc); + + guc_stage_desc_fini(guc); + guc_proc_desc_fini(guc); } static bool __guc_submission_support(struct intel_guc *guc) @@ -1185,6 +676,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc) guc->submission_supported = __guc_submission_support(guc); } -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftest_guc.c" -#endif +bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine) +{ + return engine->set_default_submission == guc_set_default_submission; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 54d716828352..e402a2932592 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -6,62 +6,18 @@ #ifndef _INTEL_GUC_SUBMISSION_H_ #define _INTEL_GUC_SUBMISSION_H_ -#include <linux/spinlock.h> +#include <linux/types.h> -#include "gt/intel_engine_types.h" - -#include "i915_gem.h" -#include "i915_selftest.h" - -struct drm_i915_private; - -/* - * This structure primarily describes the GEM object shared with the GuC. - * The specs sometimes refer to this object as a "GuC context", but we use - * the term "client" to avoid confusion with hardware contexts. This - * GEM object is held for the entire lifetime of our interaction with - * the GuC, being allocated before the GuC is loaded with its firmware. - * Because there's no way to update the address used by the GuC after - * initialisation, the shared object must stay pinned into the GGTT as - * long as the GuC is in use. We also keep the first page (only) mapped - * into kernel address space, as it includes shared data that must be - * updated on every request submission. - * - * The single GEM object described here is actually made up of several - * separate areas, as far as the GuC is concerned. The first page (kept - * kmap'd) includes the "process descriptor" which holds sequence data for - * the doorbell, and one cacheline which actually *is* the doorbell; a - * write to this will "ring the doorbell" (i.e. send an interrupt to the - * GuC). The subsequent pages of the client object constitute the work - * queue (a circular array of work items), again described in the process - * descriptor. Work queue pages are mapped momentarily as required. - */ -struct intel_guc_client { - struct i915_vma *vma; - void *vaddr; - struct intel_guc *guc; - - /* bitmap of (host) engine ids */ - u32 priority; - u32 stage_id; - u32 proc_desc_offset; - - u16 doorbell_id; - unsigned long doorbell_offset; - - /* Protects GuC client's WQ access */ - spinlock_t wq_lock; - - /* For testing purposes, use nop WQ items instead of real ones */ - I915_SELFTEST_DECLARE(bool use_nop_wqi); -}; +struct intel_guc; +struct intel_engine_cs; void intel_guc_submission_init_early(struct intel_guc *guc); int intel_guc_submission_init(struct intel_guc *guc); -int intel_guc_submission_enable(struct intel_guc *guc); +void intel_guc_submission_enable(struct intel_guc *guc); void intel_guc_submission_disable(struct intel_guc *guc); void intel_guc_submission_fini(struct intel_guc *guc); int intel_guc_preempt_work_create(struct intel_guc *guc); void intel_guc_preempt_work_destroy(struct intel_guc *guc); +bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 8be515c8d0f0..32a069841c14 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -63,7 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) void *vaddr; int err; - err = i915_inject_load_error(gt->i915, -ENXIO); + err = i915_inject_probe_error(gt->i915, -ENXIO); if (err) return err; @@ -161,7 +161,7 @@ int intel_huc_auth(struct intel_huc *huc) if (!intel_uc_fw_is_loaded(&huc->fw)) return -ENOEXEC; - ret = i915_inject_load_error(gt->i915, -ENXIO); + ret = i915_inject_probe_error(gt->i915, -ENXIO); if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index d654340d4d03..eee193bf2cc4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -39,5 +39,5 @@ void intel_huc_fw_init_early(struct intel_huc *huc) int intel_huc_fw_upload(struct intel_huc *huc) { /* HW doesn't look at destination address for HuC, so set it to 0 */ - return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL); + return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3fdbc935d155..3ffc6267f96e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -20,7 +20,7 @@ static int __intel_uc_reset_hw(struct intel_uc *uc) int ret; u32 guc_status; - ret = i915_inject_load_error(gt->i915, -ENXIO); + ret = i915_inject_probe_error(gt->i915, -ENXIO); if (ret) return ret; @@ -123,6 +123,11 @@ static void __uc_free_load_err_log(struct intel_uc *uc) i915_gem_object_put(log); } +static inline bool guc_communication_enabled(struct intel_guc *guc) +{ + return intel_guc_ct_enabled(&guc->ct); +} + /* * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 * register using the same bits used in the CT message payload. Since our @@ -158,7 +163,7 @@ static void guc_handle_mmio_msg(struct intel_guc *guc) struct drm_i915_private *i915 = guc_to_gt(guc)->i915; /* we need communication to be enabled to reply to GuC */ - GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); + GEM_BUG_ON(!guc_communication_enabled(guc)); if (!guc->mmio_msg) return; @@ -185,11 +190,6 @@ static void guc_disable_interrupts(struct intel_guc *guc) guc->interrupts.disable(guc); } -static inline bool guc_communication_enabled(struct intel_guc *guc) -{ - return guc->send != intel_guc_send_nop; -} - static int guc_enable_communication(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_gt(guc)->i915; @@ -197,7 +197,7 @@ static int guc_enable_communication(struct intel_guc *guc) GEM_BUG_ON(guc_communication_enabled(guc)); - ret = i915_inject_load_error(i915, -ENXIO); + ret = i915_inject_probe_error(i915, -ENXIO); if (ret) return ret; @@ -205,9 +205,6 @@ static int guc_enable_communication(struct intel_guc *guc) if (ret) return ret; - guc->send = intel_guc_send_ct; - guc->handler = intel_guc_to_host_event_handler_ct; - /* check for mmio messages received before/during the CT enable */ guc_get_mmio_msg(guc); guc_handle_mmio_msg(guc); @@ -216,7 +213,7 @@ static int guc_enable_communication(struct intel_guc *guc) /* check for CT messages received before we enabled interrupts */ spin_lock_irq(&i915->irq_lock); - intel_guc_to_host_event_handler_ct(guc); + intel_guc_ct_event_handler(&guc->ct); spin_unlock_irq(&i915->irq_lock); DRM_INFO("GuC communication enabled\n"); @@ -224,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc) return 0; } -static void __guc_stop_communication(struct intel_guc *guc) +static void guc_disable_communication(struct intel_guc *guc) { /* * Events generated during or after CT disable are logged by guc in @@ -235,23 +232,6 @@ static void __guc_stop_communication(struct intel_guc *guc) guc_disable_interrupts(guc); - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; -} - -static void guc_stop_communication(struct intel_guc *guc) -{ - intel_guc_ct_stop(&guc->ct); - - __guc_stop_communication(guc); - - DRM_INFO("GuC communication stopped\n"); -} - -static void guc_disable_communication(struct intel_guc *guc) -{ - __guc_stop_communication(guc); - intel_guc_ct_disable(&guc->ct); /* @@ -267,28 +247,22 @@ static void guc_disable_communication(struct intel_guc *guc) void intel_uc_fetch_firmwares(struct intel_uc *uc) { - struct drm_i915_private *i915 = uc_to_gt(uc)->i915; int err; if (!intel_uc_uses_guc(uc)) return; - err = intel_uc_fw_fetch(&uc->guc.fw, i915); + err = intel_uc_fw_fetch(&uc->guc.fw); if (err) return; if (intel_uc_uses_huc(uc)) - intel_uc_fw_fetch(&uc->huc.fw, i915); + intel_uc_fw_fetch(&uc->huc.fw); } void intel_uc_cleanup_firmwares(struct intel_uc *uc) { - if (!intel_uc_uses_guc(uc)) - return; - - if (intel_uc_uses_huc(uc)) - intel_uc_fw_cleanup_fetch(&uc->huc.fw); - + intel_uc_fw_cleanup_fetch(&uc->huc.fw); intel_uc_fw_cleanup_fetch(&uc->guc.fw); } @@ -316,15 +290,8 @@ void intel_uc_init(struct intel_uc *uc) void intel_uc_fini(struct intel_uc *uc) { - struct intel_guc *guc = &uc->guc; - - if (!intel_uc_uses_guc(uc)) - return; - - if (intel_uc_uses_huc(uc)) - intel_huc_fini(&uc->huc); - - intel_guc_fini(guc); + intel_huc_fini(&uc->huc); + intel_guc_fini(&uc->guc); __uc_free_load_err_log(uc); } @@ -372,7 +339,7 @@ static int uc_init_wopcm(struct intel_uc *uc) GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); - err = i915_inject_load_error(gt->i915, -ENXIO); + err = i915_inject_probe_error(gt->i915, -ENXIO); if (err) return err; @@ -486,11 +453,8 @@ int intel_uc_init_hw(struct intel_uc *uc) if (ret) goto err_communication; - if (intel_uc_supports_guc_submission(uc)) { - ret = intel_guc_submission_enable(guc); - if (ret) - goto err_communication; - } + if (intel_uc_supports_guc_submission(uc)) + intel_guc_submission_enable(guc); dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, @@ -560,7 +524,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc) if (!intel_guc_is_running(guc)) return; - guc_stop_communication(guc); + guc_disable_communication(guc); __uc_sanitize(uc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index bb4889d2346d..8ee0a0c7f447 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -11,7 +11,6 @@ #include "intel_uc_fw_abi.h" #include "i915_drv.h" -#ifdef CONFIG_DRM_I915_DEBUG_GUC static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw) { GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); @@ -22,6 +21,7 @@ static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw) return container_of(uc_fw, struct intel_gt, uc.huc.fw); } +#ifdef CONFIG_DRM_I915_DEBUG_GUC void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, enum intel_uc_fw_status status) { @@ -37,8 +37,13 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, /* * List of required GuC and HuC binaries per-platform. * Must be ordered based on platform + revid, from newer to older. + * + * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas + * between 33.0 and 35.2 are only related to new additions to support new Gen12 + * features. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ @@ -214,35 +219,36 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); } -static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, - struct drm_i915_private *i915, - int e) +static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e) { + struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915; bool user = e == -EINVAL; - if (i915_inject_load_error(i915, e)) { + if (i915_inject_probe_error(i915, e)) { /* non-existing blob */ uc_fw->path = "<invalid>"; uc_fw->user_overridden = user; - } else if (i915_inject_load_error(i915, e)) { + } else if (i915_inject_probe_error(i915, e)) { /* require next major version */ uc_fw->major_ver_wanted += 1; uc_fw->minor_ver_wanted = 0; uc_fw->user_overridden = user; - } else if (i915_inject_load_error(i915, e)) { + } else if (i915_inject_probe_error(i915, e)) { /* require next minor version */ uc_fw->minor_ver_wanted += 1; uc_fw->user_overridden = user; - } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) { + } else if (uc_fw->major_ver_wanted && + i915_inject_probe_error(i915, e)) { /* require prev major version */ uc_fw->major_ver_wanted -= 1; uc_fw->minor_ver_wanted = 0; uc_fw->user_overridden = user; - } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) { + } else if (uc_fw->minor_ver_wanted && + i915_inject_probe_error(i915, e)) { /* require prev minor version - hey, this should work! */ uc_fw->minor_ver_wanted -= 1; uc_fw->user_overridden = user; - } else if (user && i915_inject_load_error(i915, e)) { + } else if (user && i915_inject_probe_error(i915, e)) { /* officially unsupported platform */ uc_fw->major_ver_wanted = 0; uc_fw->minor_ver_wanted = 0; @@ -253,14 +259,14 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, /** * intel_uc_fw_fetch - fetch uC firmware * @uc_fw: uC firmware - * @i915: device private * * Fetch uC firmware into GEM obj. * * Return: 0 on success, a negative errno code on failure. */ -int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) +int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) { + struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915; struct device *dev = i915->drm.dev; struct drm_i915_gem_object *obj; const struct firmware *fw = NULL; @@ -271,12 +277,12 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) GEM_BUG_ON(!i915->wopcm.size); GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw)); - err = i915_inject_load_error(i915, -ENXIO); + err = i915_inject_probe_error(i915, -ENXIO); if (err) return err; - __force_fw_fetch_failures(uc_fw, i915, -EINVAL); - __force_fw_fetch_failures(uc_fw, i915, -ESTALE); + __force_fw_fetch_failures(uc_fw, -EINVAL); + __force_fw_fetch_failures(uc_fw, -ESTALE); err = request_firmware(&fw, uc_fw->path, dev); if (err) @@ -383,8 +389,9 @@ fail: return err; } -static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) +static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw) { + struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; struct drm_mm_node *node = &ggtt->uc_fw; GEM_BUG_ON(!drm_mm_node_allocated(node)); @@ -394,13 +401,12 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) return lower_32_bits(node->start); } -static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, - struct intel_gt *gt) +static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw) { struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = gt->ggtt; + struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; struct i915_vma dummy = { - .node.start = uc_fw_ggtt_offset(uc_fw, ggtt), + .node.start = uc_fw_ggtt_offset(uc_fw), .node.size = obj->base.size, .pages = obj->mm.pages, .vm = &ggtt->vm, @@ -415,37 +421,36 @@ static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); } -static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw, - struct intel_gt *gt) +static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw) { struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = gt->ggtt; - u64 start = uc_fw_ggtt_offset(uc_fw, ggtt); + struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; + u64 start = uc_fw_ggtt_offset(uc_fw); ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); } -static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, - u32 wopcm_offset, u32 dma_flags) +static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) { + struct intel_gt *gt = __uc_fw_to_gt(uc_fw); struct intel_uncore *uncore = gt->uncore; u64 offset; int ret; - ret = i915_inject_load_error(gt->i915, -ETIMEDOUT); + ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT); if (ret) return ret; intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Set the source address for the uCode */ - offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt); + offset = uc_fw_ggtt_offset(uc_fw); GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); /* Set the DMA destination */ - intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset); + intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset); intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); /* @@ -477,23 +482,22 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, /** * intel_uc_fw_upload - load uC firmware using custom loader * @uc_fw: uC firmware - * @gt: the intel_gt structure - * @wopcm_offset: destination offset in wopcm + * @dst_offset: destination offset * @dma_flags: flags for flags for dma ctrl * * Loads uC firmware and updates internal flags. * * Return: 0 on success, non-zero on failure. */ -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, - u32 wopcm_offset, u32 dma_flags) +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) { + struct intel_gt *gt = __uc_fw_to_gt(uc_fw); int err; /* make sure the status was cleared the last time we reset the uc */ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); - err = i915_inject_load_error(gt->i915, -ENOEXEC); + err = i915_inject_probe_error(gt->i915, -ENOEXEC); if (err) return err; @@ -501,9 +505,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, return -ENOEXEC; /* Call custom loader */ - intel_uc_fw_ggtt_bind(uc_fw, gt); - err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags); - intel_uc_fw_ggtt_unbind(uc_fw, gt); + uc_fw_bind_ggtt(uc_fw); + err = uc_fw_xfer(uc_fw, dst_offset, dma_flags); + uc_fw_unbind_ggtt(uc_fw); if (err) goto fail; @@ -540,10 +544,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw) void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) { - if (!intel_uc_fw_is_available(uc_fw)) - return; - - i915_gem_object_unpin_pages(uc_fw->obj); + intel_uc_fw_cleanup_fetch(uc_fw); } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 7a0a5989afc9..1f30543d0d2d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -229,10 +229,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type, bool supported, enum intel_platform platform, u8 rev); -int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915); +int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, - u32 wopcm_offset, u32 dma_flags); +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags); int intel_uc_fw_init(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c deleted file mode 100644 index d8a80388bd31..000000000000 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ /dev/null @@ -1,299 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2017 Intel Corporation - */ - -#include "i915_selftest.h" -#include "gem/i915_gem_pm.h" - -/* max doorbell number + negative test for each client type */ -#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM) - -static struct intel_guc_client *clients[ATTEMPTS]; - -static bool available_dbs(struct intel_guc *guc, u32 priority) -{ - unsigned long offset; - unsigned long end; - u16 id; - - /* first half is used for normal priority, second half for high */ - offset = 0; - end = GUC_NUM_DOORBELLS / 2; - if (priority <= GUC_CLIENT_PRIORITY_HIGH) { - offset = end; - end += offset; - } - - id = find_next_zero_bit(guc->doorbell_bitmap, end, offset); - if (id < end) - return true; - - return false; -} - -static int check_all_doorbells(struct intel_guc *guc) -{ - u16 db_id; - - pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS); - for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) { - if (!doorbell_ok(guc, db_id)) { - pr_err("doorbell %d, not ok\n", db_id); - return -EIO; - } - } - - return 0; -} - -static int ring_doorbell_nop(struct intel_guc_client *client) -{ - struct guc_process_desc *desc = __get_process_desc(client); - int err; - - client->use_nop_wqi = true; - - spin_lock_irq(&client->wq_lock); - - guc_wq_item_append(client, 0, 0, 0, 0); - guc_ring_doorbell(client); - - spin_unlock_irq(&client->wq_lock); - - client->use_nop_wqi = false; - - /* if there are no issues GuC will update the WQ head and keep the - * WQ in active status - */ - err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); - if (err) { - pr_err("doorbell %u ring failed!\n", client->doorbell_id); - return -EIO; - } - - if (desc->wq_status != WQ_STATUS_ACTIVE) { - pr_err("doorbell %u ring put WQ in bad state (%u)!\n", - client->doorbell_id, desc->wq_status); - return -EIO; - } - - return 0; -} - -/* - * Basic client sanity check, handy to validate create_clients. - */ -static int validate_client(struct intel_guc_client *client, int client_priority) -{ - if (client->priority != client_priority || - client->doorbell_id == GUC_DOORBELL_INVALID) - return -EINVAL; - else - return 0; -} - -static bool client_doorbell_in_sync(struct intel_guc_client *client) -{ - return !client || doorbell_ok(client->guc, client->doorbell_id); -} - -/* - * Check that we're able to synchronize guc_clients with their doorbells - * - * We're creating clients and reserving doorbells once, at module load. During - * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to - * GuC being reset. In other words - GuC clients are still around, but the - * status of their doorbells may be incorrect. This is the reason behind - * validating that the doorbells status expected by the driver matches what the - * GuC/HW have. - */ -static int igt_guc_clients(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_guc *guc = >->uc.guc; - intel_wakeref_t wakeref; - int err = 0; - - GEM_BUG_ON(!HAS_GT_UC(gt->i915)); - wakeref = intel_runtime_pm_get(gt->uncore->rpm); - - err = check_all_doorbells(guc); - if (err) - goto unlock; - - /* - * Get rid of clients created during driver load because the test will - * recreate them. - */ - guc_clients_disable(guc); - guc_clients_destroy(guc); - if (guc->execbuf_client) { - pr_err("guc_clients_destroy lied!\n"); - err = -EINVAL; - goto unlock; - } - - err = guc_clients_create(guc); - if (err) { - pr_err("Failed to create clients\n"); - goto unlock; - } - GEM_BUG_ON(!guc->execbuf_client); - - err = validate_client(guc->execbuf_client, - GUC_CLIENT_PRIORITY_KMD_NORMAL); - if (err) { - pr_err("execbug client validation failed\n"); - goto out; - } - - /* the client should now have reserved a doorbell */ - if (!has_doorbell(guc->execbuf_client)) { - pr_err("guc_clients_create didn't reserve doorbells\n"); - err = -EINVAL; - goto out; - } - - /* Now enable the clients */ - guc_clients_enable(guc); - - /* each client should now have received a doorbell */ - if (!client_doorbell_in_sync(guc->execbuf_client)) { - pr_err("failed to initialize the doorbells\n"); - err = -EINVAL; - goto out; - } - - /* - * Basic test - an attempt to reallocate a valid doorbell to the - * client it is currently assigned should not cause a failure. - */ - err = create_doorbell(guc->execbuf_client); - -out: - /* - * Leave clean state for other test, plus the driver always destroy the - * clients during unload. - */ - guc_clients_disable(guc); - guc_clients_destroy(guc); - guc_clients_create(guc); - guc_clients_enable(guc); -unlock: - intel_runtime_pm_put(gt->uncore->rpm, wakeref); - return err; -} - -/* - * Create as many clients as number of doorbells. Note that there's already - * client(s)/doorbell(s) created during driver load, but this test creates - * its own and do not interact with the existing ones. - */ -static int igt_guc_doorbells(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_guc *guc = >->uc.guc; - intel_wakeref_t wakeref; - int i, err = 0; - u16 db_id; - - GEM_BUG_ON(!HAS_GT_UC(gt->i915)); - wakeref = intel_runtime_pm_get(gt->uncore->rpm); - - err = check_all_doorbells(guc); - if (err) - goto unlock; - - for (i = 0; i < ATTEMPTS; i++) { - clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM); - - if (!clients[i]) { - pr_err("[%d] No guc client\n", i); - err = -EINVAL; - goto out; - } - - if (IS_ERR(clients[i])) { - if (PTR_ERR(clients[i]) != -ENOSPC) { - pr_err("[%d] unexpected error\n", i); - err = PTR_ERR(clients[i]); - goto out; - } - - if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) { - pr_err("[%d] non-db related alloc fail\n", i); - err = -EINVAL; - goto out; - } - - /* expected, ran out of dbs for this client type */ - continue; - } - - /* - * The check below is only valid because we keep a doorbell - * assigned during the whole life of the client. - */ - if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) { - pr_err("[%d] more clients than doorbells (%d >= %d)\n", - i, clients[i]->stage_id, GUC_NUM_DOORBELLS); - err = -EINVAL; - goto out; - } - - err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM); - if (err) { - pr_err("[%d] client_alloc sanity check failed!\n", i); - err = -EINVAL; - goto out; - } - - db_id = clients[i]->doorbell_id; - - err = __guc_client_enable(clients[i]); - if (err) { - pr_err("[%d] Failed to create a doorbell\n", i); - goto out; - } - - /* doorbell id shouldn't change, we are holding the mutex */ - if (db_id != clients[i]->doorbell_id) { - pr_err("[%d] doorbell id changed (%d != %d)\n", - i, db_id, clients[i]->doorbell_id); - err = -EINVAL; - goto out; - } - - err = check_all_doorbells(guc); - if (err) - goto out; - - err = ring_doorbell_nop(clients[i]); - if (err) - goto out; - } - -out: - for (i = 0; i < ATTEMPTS; i++) - if (!IS_ERR_OR_NULL(clients[i])) { - __guc_client_disable(clients[i]); - guc_client_free(clients[i]); - } -unlock: - intel_runtime_pm_put(gt->uncore->rpm, wakeref); - return err; -} - -int intel_guc_live_selftest(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_guc_clients), - SUBTEST(igt_guc_doorbells), - }; - - if (!USES_GUC_SUBMISSION(i915)) - return 0; - - return intel_gt_live_subtests(tests, &i915->gt); -} diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index e753b1e706e2..21a176cd8acc 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -35,7 +35,9 @@ */ #include <linux/slab.h> + #include "i915_drv.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" @@ -1597,9 +1599,9 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s) if (!(cmd_val(s, 0) & (1 << 22))) return ret; - /* check if QWORD */ - if (DWORD_FIELD(0, 20, 19) == 1) - valid_len += 8; + /* check inline data */ + if (cmd_val(s, 0) & BIT(18)) + valid_len = CMD_LEN(9); ret = gvt_check_valid_cmd_length(cmd_length(s), valid_len); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 286703643002..ab25d151932a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -38,6 +38,10 @@ #define GVT_CMD_HASH_BITS 7 +struct intel_gvt; +struct intel_shadow_wa_ctx; +struct intel_vgpu_workload; + void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt); int intel_gvt_init_cmd_parser(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index a87f33e6a23c..b59b34046e1e 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -35,6 +35,11 @@ #ifndef _GVT_DISPLAY_H_ #define _GVT_DISPLAY_H_ +#include <linux/types.h> + +struct intel_gvt; +struct intel_vgpu; + #define SBI_REG_MAX 20 #define DPCD_SIZE 0x700 diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 13044c027f27..e451298d11c3 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -152,6 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct intel_vgpu_fb_info *info) { + static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; @@ -161,7 +162,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); - i915_gem_object_init(obj, &intel_vgpu_gem_ops); + i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; @@ -498,8 +499,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) goto out_free_gem; } - i915_gem_object_put(obj); - ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); if (ret < 0) { gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); @@ -524,6 +523,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) file_count(dmabuf->file), kref_read(&obj->base.refcount)); + i915_gem_object_put(obj); + return dmabuf_fd; out_free_dmabuf: diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index f6dfc8b795ec..dfe0cbc6aad8 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -35,6 +35,10 @@ #ifndef _GVT_EDID_H_ #define _GVT_EDID_H_ +#include <linux/types.h> + +struct intel_vgpu; + #define EDID_SIZE 128 #define EDID_ADDR 0x50 /* Linux hvm EDID addr */ diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 5ccc2c695848..5c0c1fd30c83 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -35,6 +35,8 @@ #ifndef _GVT_EXECLIST_H_ #define _GVT_EXECLIST_H_ +#include <linux/types.h> + struct execlist_ctx_descriptor_format { union { u32 ldw; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index 60c155085029..67b6ede9e707 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -36,6 +36,8 @@ #ifndef _GVT_FB_DECODER_H_ #define _GVT_FB_DECODER_H_ +#include <linux/types.h> + #define _PLANE_CTL_FORMAT_SHIFT 24 #define _PLANE_CTL_TILED_SHIFT 10 #define _PIPE_V_SRCSZ_SHIFT 0 diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4b04af569c05..34cb404ba4b7 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1282,7 +1282,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, return -EINVAL; default: GEM_BUG_ON(1); - }; + } /* direct shadow */ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 45a9124e53b6..bb9fe6bf5275 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -460,6 +460,7 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static i915_reg_t force_nonpriv_white_list[] = { GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) + PS_INVOCATION_COUNT,//_MMIO(0x2348) GEN8_CS_CHICKEN1,//_MMIO(0x2580) _MMIO(0x2690), _MMIO(0x2694), @@ -508,7 +509,7 @@ static inline bool in_whitelist(unsigned int reg) static int force_nonpriv_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - u32 reg_nonpriv = *(u32 *)p_data; + u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2); int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); u32 ring_base; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; @@ -528,7 +529,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, bytes); } else gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n", - vgpu->id, reg_nonpriv, offset); + vgpu->id, *(u32 *)p_data, offset); return 0; } @@ -819,13 +820,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; enum intel_gvt_event_type event; - if (reg == _DPA_AUX_CH_CTL) + if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) event = AUX_CHANNEL_A; - else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL) + else if (reg == _PCH_DPB_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B))) event = AUX_CHANNEL_B; - else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL) + else if (reg == _PCH_DPC_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C))) event = AUX_CHANNEL_C; - else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL) + else if (reg == _PCH_DPD_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) event = AUX_CHANNEL_D; else { WARN_ON(true); @@ -2872,11 +2876,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); @@ -3417,6 +3421,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, } for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { + /* pvinfo data doesn't come from hw mmio */ + if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE) + continue; + for (j = 0; j < block->size; j += 4) { ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 4862fb12778e..9599c0a762b2 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -33,6 +33,10 @@ #ifndef _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_ +#include <linux/types.h> + +struct device; + enum hypervisor_type { INTEL_GVT_HYPERVISOR_XEN = 0, INTEL_GVT_HYPERVISOR_KVM, diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 5313fb1b33e1..fcd663811d37 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -32,6 +32,8 @@ #ifndef _GVT_INTERRUPT_H_ #define _GVT_INTERRUPT_H_ +#include <linux/types.h> + enum intel_gvt_event_type { RCS_MI_USER_INTERRUPT = 0, RCS_DEBUG, @@ -135,6 +137,7 @@ enum intel_gvt_event_type { struct intel_gvt_irq; struct intel_gvt; +struct intel_vgpu; typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq, enum intel_gvt_event_type event, struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 5874f1cb4306..2e68f4b02c94 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -36,6 +36,8 @@ #ifndef _GVT_MMIO_H_ #define _GVT_MMIO_H_ +#include <linux/types.h> + struct intel_gvt; struct intel_vgpu; diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 4208e40445b1..aaf15916d29a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -35,6 +35,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h index fa607a71c3c0..f6eb7135583c 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.h +++ b/drivers/gpu/drm/i915/gvt/page_track.h @@ -25,6 +25,9 @@ #ifndef _GVT_PAGE_TRACK_H_ #define _GVT_PAGE_TRACK_H_ +#include <linux/types.h> + +struct intel_vgpu; struct intel_vgpu_page_track; typedef int (*gvt_page_track_handler_t)( diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index 7b59e3e88b8b..3dacdad5f529 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h @@ -34,6 +34,9 @@ #ifndef __GVT_SCHED_POLICY__ #define __GVT_SCHED_POLICY__ +struct intel_gvt; +struct intel_vgpu; + struct intel_gvt_sched_policy_ops { int (*init)(struct intel_gvt *gvt); void (*clean)(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 36bb7639e82f..b3299f88e24e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -35,11 +35,12 @@ #include <linux/kthread.h> -#include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "i915_drv.h" +#include "i915_gem_gtt.h" #include "gvt.h" #define RING_CTX_OFF(x) \ @@ -58,7 +59,7 @@ static void set_context_pdp_root_pointer( static void update_shadow_pdps(struct intel_vgpu_workload *workload) { struct drm_i915_gem_object *ctx_obj = - workload->req->hw_context->state->obj; + workload->req->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; @@ -129,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct intel_gvt *gvt = vgpu->gvt; int ring_id = workload->ring_id; struct drm_i915_gem_object *ctx_obj = - workload->req->hw_context->state->obj; + workload->req->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *dst; @@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); @@ -204,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return 0; } -static inline bool is_gvt_request(struct i915_request *req) +static inline bool is_gvt_request(struct i915_request *rq) { - return i915_gem_context_force_single_submission(req->gem_context); + return intel_context_force_single_submission(rq->context); } static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) @@ -306,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) u32 *cs; int err; - if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context)) + if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* @@ -362,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) } static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, - struct i915_gem_context *ctx) + struct intel_context *ce) { struct intel_vgpu_mm *mm = workload->shadow_mm; - struct i915_ppgtt *ppgtt = - i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); int i = 0; if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { @@ -379,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } - - i915_vm_put(&ppgtt->vm); } static int @@ -528,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); struct i915_request *rq = workload->req; struct execlist_ring_context *shadow_ring_context = - (struct execlist_ring_context *)rq->hw_context->lrc_reg_state; + (struct execlist_ring_context *)rq->context->lrc_reg_state; shadow_ring_context->bb_per_ctx_ptr.val = (shadow_ring_context->bb_per_ctx_ptr.val & @@ -627,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) update_shadow_pdps(workload); - set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); + set_context_ppgtt_from_shadow(workload, s->shadow[ring]); ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { @@ -786,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj; + struct drm_i915_gem_object *ctx_obj = rq->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *src; @@ -834,7 +832,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, I915_GTT_PAGE_SIZE); @@ -1222,18 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; struct i915_ppgtt *ppgtt; enum intel_engine_id i; int ret; - ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ppgtt = i915_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); - i915_gem_context_set_force_single_submission(ctx); - - ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); i915_context_ppgtt_root_save(s, ppgtt); for_each_engine(engine, i915, i) { @@ -1242,12 +1236,16 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&s->workload_q_head[i]); s->shadow[i] = ERR_PTR(-EINVAL); - ce = intel_context_create(ctx, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); goto out_shadow_ctx; } + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(&ppgtt->vm); + intel_context_set_single_submission(ce); + if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */ const unsigned int ring_size = 512 * SZ_4K; @@ -1280,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); i915_vm_put(&ppgtt->vm); - i915_gem_context_put(ctx); return 0; out_shadow_ctx: @@ -1293,7 +1290,6 @@ out_shadow_ctx: intel_context_put(s->shadow[i]); } i915_vm_put(&ppgtt->vm); - i915_gem_context_put(ctx); return ret; } @@ -1584,9 +1580,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, */ if (list_empty(workload_q_head(vgpu, ring_id))) { intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&vgpu->vgpu_lock); ret = intel_gvt_scan_and_shadow_workload(workload); - mutex_unlock(&vgpu->vgpu_lock); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 7927b1a0c7a6..cfe09964622b 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -6,7 +6,9 @@ #include <linux/debugobjects.h> +#include "gt/intel_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_active.h" @@ -90,14 +92,14 @@ static void debug_active_init(struct i915_active *ref) static void debug_active_activate(struct i915_active *ref) { - lockdep_assert_held(&ref->mutex); + lockdep_assert_held(&ref->tree_lock); if (!atomic_read(&ref->count)) /* before the first inc */ debug_object_activate(ref, &active_debug_desc); } static void debug_active_deactivate(struct i915_active *ref) { - lockdep_assert_held(&ref->mutex); + lockdep_assert_held(&ref->tree_lock); if (!atomic_read(&ref->count)) /* after the last dec */ debug_object_deactivate(ref, &active_debug_desc); } @@ -127,29 +129,22 @@ __active_retire(struct i915_active *ref) { struct active_node *it, *n; struct rb_root root; - bool retire = false; + unsigned long flags; - lockdep_assert_held(&ref->mutex); GEM_BUG_ON(i915_active_is_idle(ref)); /* return the unused nodes to our slabcache -- flushing the allocator */ - if (atomic_dec_and_test(&ref->count)) { - debug_active_deactivate(ref); - root = ref->tree; - ref->tree = RB_ROOT; - ref->cache = NULL; - retire = true; - } - - mutex_unlock(&ref->mutex); - if (!retire) + if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) return; GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); - rbtree_postorder_for_each_entry_safe(it, n, &root, node) { - GEM_BUG_ON(i915_active_fence_isset(&it->base)); - kmem_cache_free(global.slab_cache, it); - } + debug_active_deactivate(ref); + + root = ref->tree; + ref->tree = RB_ROOT; + ref->cache = NULL; + + spin_unlock_irqrestore(&ref->tree_lock, flags); /* After the final retire, the entire struct may be freed */ if (ref->retire) @@ -157,6 +152,11 @@ __active_retire(struct i915_active *ref) /* ... except if you wait on it, you must manage your own references! */ wake_up_var(ref); + + rbtree_postorder_for_each_entry_safe(it, n, &root, node) { + GEM_BUG_ON(i915_active_fence_isset(&it->base)); + kmem_cache_free(global.slab_cache, it); + } } static void @@ -168,7 +168,6 @@ active_work(struct work_struct *wrk) if (atomic_add_unless(&ref->count, -1, 1)) return; - mutex_lock(&ref->mutex); __active_retire(ref); } @@ -179,9 +178,7 @@ active_retire(struct i915_active *ref) if (atomic_add_unless(&ref->count, -1, 1)) return; - /* If we are inside interrupt context (fence signaling), defer */ - if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS || - !mutex_trylock(&ref->mutex)) { + if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { queue_work(system_unbound_wq, &ref->work); return; } @@ -189,18 +186,33 @@ active_retire(struct i915_active *ref) __active_retire(ref); } +static inline struct dma_fence ** +__active_fence_slot(struct i915_active_fence *active) +{ + return (struct dma_fence ** __force)&active->fence; +} + +static inline bool +active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct i915_active_fence *active = + container_of(cb, typeof(*active), cb); + + return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; +} + static void node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) { - i915_active_fence_cb(fence, cb); - active_retire(container_of(cb, struct active_node, base.cb)->ref); + if (active_fence_cb(fence, cb)) + active_retire(container_of(cb, struct active_node, base.cb)->ref); } static void excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) { - i915_active_fence_cb(fence, cb); - active_retire(container_of(cb, struct i915_active, excl.cb)); + if (active_fence_cb(fence, cb)) + active_retire(container_of(cb, struct i915_active, excl.cb)); } static struct i915_active_fence * @@ -226,7 +238,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) if (!prealloc) return NULL; - mutex_lock(&ref->mutex); + spin_lock_irq(&ref->tree_lock); GEM_BUG_ON(i915_active_is_idle(ref)); parent = NULL; @@ -247,7 +259,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) } node = prealloc; - __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire); + __i915_active_fence_init(&node->base, NULL, node_retire); node->ref = ref; node->timeline = idx; @@ -256,7 +268,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) out: ref->cache = node; - mutex_unlock(&ref->mutex); + spin_unlock_irq(&ref->tree_lock); BUILD_BUG_ON(offsetof(typeof(*node), base)); return &node->base; @@ -265,7 +277,8 @@ out: void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), - struct lock_class_key *key) + struct lock_class_key *mkey, + struct lock_class_key *wkey) { unsigned long bits; @@ -277,13 +290,18 @@ void __i915_active_init(struct i915_active *ref, if (bits & I915_ACTIVE_MAY_SLEEP) ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; + spin_lock_init(&ref->tree_lock); ref->tree = RB_ROOT; ref->cache = NULL; + init_llist_head(&ref->preallocated_barriers); atomic_set(&ref->count, 0); - __mutex_init(&ref->mutex, "i915_active", key); - __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire); + __mutex_init(&ref->mutex, "i915_active", mkey); + __i915_active_fence_init(&ref->excl, NULL, excl_retire); INIT_WORK(&ref->work, active_work); +#if IS_ENABLED(CONFIG_LOCKDEP) + lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); +#endif } static bool ____active_del_barrier(struct i915_active *ref, @@ -377,15 +395,8 @@ void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) /* We expect the caller to manage the exclusive timeline ordering */ GEM_BUG_ON(i915_active_is_idle(ref)); - /* - * As we don't know which mutex the caller is using, we told a small - * lie to the debug code that it is using the i915_active.mutex; - * and now we must stick to that lie. - */ - mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_); if (!__i915_active_fence_set(&ref->excl, f)) atomic_inc(&ref->count); - mutex_release(&ref->mutex.dep_map, 0, _THIS_IP_); } bool i915_active_acquire_if_busy(struct i915_active *ref) @@ -408,8 +419,10 @@ int i915_active_acquire(struct i915_active *ref) if (!atomic_read(&ref->count) && ref->active) err = ref->active(ref); if (!err) { + spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */ debug_active_activate(ref); atomic_inc(&ref->count); + spin_unlock_irq(&ref->tree_lock); } mutex_unlock(&ref->mutex); @@ -462,6 +475,7 @@ int i915_active_wait(struct i915_active *ref) if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) return -EINTR; + flush_work(&ref->work); return 0; } @@ -509,7 +523,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) if (RB_EMPTY_ROOT(&ref->tree)) return NULL; - mutex_lock(&ref->mutex); + spin_lock_irq(&ref->tree_lock); GEM_BUG_ON(i915_active_is_idle(ref)); /* @@ -574,7 +588,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) goto match; } - mutex_unlock(&ref->mutex); + spin_unlock_irq(&ref->tree_lock); return NULL; @@ -582,7 +596,7 @@ match: rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ if (p == &ref->cache->node) ref->cache = NULL; - mutex_unlock(&ref->mutex); + spin_unlock_irq(&ref->tree_lock); return rb_entry(p, struct active_node, node); } @@ -595,6 +609,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct llist_node *pos, *next; int err; + GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); /* @@ -615,10 +630,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, goto unwind; } -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) - node->base.lock = - &engine->kernel_context->timeline->mutex; -#endif RCU_INIT_POINTER(node->base.fence, NULL); node->base.cb.func = node_retire; node->timeline = idx; @@ -639,6 +650,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, node->base.cb.node.prev = (void *)engine; atomic_inc(&ref->count); } + GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); GEM_BUG_ON(barrier_to_engine(node) != engine); llist_add(barrier_to_ll(node), &ref->preallocated_barriers); @@ -662,6 +674,7 @@ unwind: void i915_active_acquire_barrier(struct i915_active *ref) { struct llist_node *pos, *next; + unsigned long flags; GEM_BUG_ON(i915_active_is_idle(ref)); @@ -671,12 +684,13 @@ void i915_active_acquire_barrier(struct i915_active *ref) * populated by i915_request_add_active_barriers() to point to the * request that will eventually release them. */ - mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { struct active_node *node = barrier_from_ll(pos); struct intel_engine_cs *engine = barrier_to_engine(node); struct rb_node **p, *parent; + spin_lock_irqsave_nested(&ref->tree_lock, flags, + SINGLE_DEPTH_NESTING); parent = NULL; p = &ref->tree.rb_node; while (*p) { @@ -692,12 +706,17 @@ void i915_active_acquire_barrier(struct i915_active *ref) } rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &ref->tree); + spin_unlock_irqrestore(&ref->tree_lock, flags); GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); llist_add(barrier_to_ll(node), &engine->barrier_tasks); intel_engine_pm_put(engine); } - mutex_unlock(&ref->mutex); +} + +static struct dma_fence **ll_to_fence_slot(struct llist_node *node) +{ + return __active_fence_slot(&barrier_from_ll(node)->base); } void i915_request_add_active_barriers(struct i915_request *rq) @@ -706,6 +725,7 @@ void i915_request_add_active_barriers(struct i915_request *rq) struct llist_node *node, *next; unsigned long flags; + GEM_BUG_ON(!intel_context_is_barrier(rq->context)); GEM_BUG_ON(intel_engine_is_virtual(engine)); GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); @@ -719,19 +739,13 @@ void i915_request_add_active_barriers(struct i915_request *rq) */ spin_lock_irqsave(&rq->lock, flags); llist_for_each_safe(node, next, node) { - RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence); - smp_wmb(); /* serialise with reuse_idle_barrier */ + /* serialise with reuse_idle_barrier */ + smp_store_mb(*ll_to_fence_slot(node), &rq->fence); list_add_tail((struct list_head *)node, &rq->fence.cb_list); } spin_unlock_irqrestore(&rq->lock, flags); } -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) -#define active_is_held(active) lockdep_is_held((active)->lock) -#else -#define active_is_held(active) true -#endif - /* * __i915_active_fence_set: Update the last active fence along its timeline * @active: the active tracker @@ -742,7 +756,7 @@ void i915_request_add_active_barriers(struct i915_request *rq) * fence onto this one. Returns the previous fence (if not already completed), * which the caller must ensure is executed before the new fence. To ensure * that the order of fences within the timeline of the i915_active_fence is - * maintained, it must be locked by the caller. + * understood, it should be locked by the caller. */ struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -751,34 +765,41 @@ __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *prev; unsigned long flags; - /* NB: must be serialised by an outer timeline mutex (active->lock) */ - spin_lock_irqsave(fence->lock, flags); + if (fence == rcu_access_pointer(active->fence)) + return fence; + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); - prev = rcu_dereference_protected(active->fence, active_is_held(active)); + /* + * Consider that we have two threads arriving (A and B), with + * C already resident as the active->fence. + * + * A does the xchg first, and so it sees C or NULL depending + * on the timing of the interrupt handler. If it is NULL, the + * previous fence must have been signaled and we know that + * we are first on the timeline. If it is still present, + * we acquire the lock on that fence and serialise with the interrupt + * handler, in the process removing it from any future interrupt + * callback. A will then wait on C before executing (if present). + * + * As B is second, it sees A as the previous fence and so waits for + * it to complete its transition and takes over the occupancy for + * itself -- remembering that it needs to wait on A before executing. + * + * Note the strong ordering of the timeline also provides consistent + * nesting rules for the fence->lock; the inner lock is always the + * older lock. + */ + spin_lock_irqsave(fence->lock, flags); + prev = xchg(__active_fence_slot(active), fence); if (prev) { GEM_BUG_ON(prev == fence); spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ - - /* - * active->fence is reset by the callback from inside - * interrupt context. We need to serialise our list - * manipulation with the fence->lock to prevent the prev - * being lost inside an interrupt (it can't be replaced as - * no other caller is allowed to enter __i915_active_fence_set - * as we hold the timeline lock). After serialising with - * the callback, we need to double check which ran first, - * our list_del() [decoupling prev from the callback] or - * the callback... - */ - prev = rcu_access_pointer(active->fence); } - - rcu_assign_pointer(active->fence, fence); + GEM_BUG_ON(rcu_access_pointer(active->fence) != fence); list_add_tail(&active->cb.node, &fence->cb_list); - spin_unlock_irqrestore(fence->lock, flags); return prev; @@ -790,10 +811,6 @@ int i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *fence; int err = 0; -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) - lockdep_assert_held(active->lock); -#endif - /* Must maintain timeline ordering wrt previous active requests */ rcu_read_lock(); fence = __i915_active_fence_set(active, &rq->fence); @@ -810,7 +827,7 @@ int i915_active_fence_set(struct i915_active_fence *active, void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) { - i915_active_fence_cb(fence, cb); + active_fence_cb(fence, cb); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 4f52fe6146d2..b571f675c795 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -61,19 +61,15 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb); */ static inline void __i915_active_fence_init(struct i915_active_fence *active, - struct mutex *lock, void *fence, dma_fence_func_t fn) { RCU_INIT_POINTER(active->fence, fence); active->cb.func = fn ?: i915_active_noop; -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) - active->lock = lock; -#endif } -#define INIT_ACTIVE_FENCE(A, LOCK) \ - __i915_active_fence_init((A), (LOCK), NULL, NULL) +#define INIT_ACTIVE_FENCE(A) \ + __i915_active_fence_init((A), NULL, NULL) struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -127,15 +123,6 @@ i915_active_fence_isset(const struct i915_active_fence *active) return rcu_access_pointer(active->fence); } -static inline void -i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) -{ - struct i915_active_fence *active = - container_of(cb, typeof(*active), cb); - - RCU_INIT_POINTER(active->fence, NULL); -} - /* * GPU activity tracking * @@ -165,11 +152,15 @@ i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), - struct lock_class_key *key); + struct lock_class_key *mkey, + struct lock_class_key *wkey); + +/* Specialise each class of i915_active to avoid impossible lockdep cycles. */ #define i915_active_init(ref, active, retire) do { \ - static struct lock_class_key __key; \ + static struct lock_class_key __mkey; \ + static struct lock_class_key __wkey; \ \ - __i915_active_init(ref, active, retire, &__key); \ + __i915_active_init(ref, active, retire, &__mkey, &__wkey); \ } while (0) int i915_active_ref(struct i915_active *ref, @@ -214,4 +205,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, void i915_active_acquire_barrier(struct i915_active *ref); void i915_request_add_active_barriers(struct i915_request *rq); +void i915_active_print(struct i915_active *ref, struct drm_printer *m); +void i915_active_unlock_wait(struct i915_active *ref); + #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index d89a74c142c6..6360c3e4b765 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -20,21 +20,6 @@ struct i915_active_fence { struct dma_fence __rcu *fence; struct dma_fence_cb cb; -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) - /* - * Incorporeal! - * - * Updates to the i915_active_request must be serialised under a lock - * to ensure that the timeline is ordered. Normally, this is the - * timeline->mutex, but another mutex may be used so long as it is - * done so consistently. - * - * For lockdep tracking of the above, we store the lock we intend - * to always use for updates of this i915_active_request during - * construction and assert that is held on every update. - */ - struct mutex *lock; -#endif }; struct active_node; @@ -48,6 +33,7 @@ struct i915_active { atomic_t count; struct mutex mutex; + spinlock_t tree_lock; struct active_node *cache; struct rb_root tree; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 24555102e198..a0e437aa65b7 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -53,13 +53,11 @@ * granting userspace undue privileges. There are three categories of privilege. * * First, commands which are explicitly defined as privileged or which should - * only be used by the kernel driver. The parser generally rejects such - * commands, though it may allow some from the drm master process. + * only be used by the kernel driver. The parser rejects such commands * * Second, commands which access registers. To support correct/enhanced * userspace functionality, particularly certain OpenGL extensions, the parser - * provides a whitelist of registers which userspace may safely access (for both - * normal and drm master processes). + * provides a whitelist of registers which userspace may safely access * * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * The parser always rejects such commands. @@ -84,9 +82,9 @@ * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories - * mentioned above: rejected, master-only, register whitelist. The parser - * implements a number of checks, including the privileged memory checks, via a - * general bitmasking mechanism. + * mentioned above: rejected, register whitelist. The parser implements a number + * of checks, including the privileged memory checks, via a general bitmasking + * mechanism. */ /* @@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor { * CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REGISTER: The command should be checked against the * register whitelist for the appropriate ring - * CMD_DESC_MASTER: The command is allowed if the submitting process - * is the DRM master */ u32 flags; #define CMD_DESC_FIXED (1<<0) @@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor { #define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_BITMASK (1<<4) -#define CMD_DESC_MASTER (1<<5) /* * The command's unique identification bits and the bitmask to get them. @@ -194,7 +189,7 @@ struct drm_i915_cmd_table { #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), ~0u << (opm) }, \ + .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } @@ -209,14 +204,13 @@ struct drm_i915_cmd_table { #define R CMD_DESC_REJECT #define W CMD_DESC_REGISTER #define B CMD_DESC_BITMASK -#define M CMD_DESC_MASTER /* Command Mask Fixed Len Action ---------------------------------------------------------- */ -static const struct drm_i915_cmd_descriptor common_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = { CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), - CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ), CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), @@ -241,12 +235,12 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { /* * MI_BATCH_BUFFER_START requires some special handling. It's not * really a 'skip' action but it doesn't seem like it's worth adding - * a new action. See i915_parse_cmds(). + * a new action. See intel_engine_cmd_parser(). */ CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), }; -static const struct drm_i915_cmd_descriptor render_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = { CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_PREDICATE, SMI, F, 1, S ), @@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ), - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), @@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), }; -static const struct drm_i915_cmd_descriptor video_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { CMD( MFX_WAIT, SMFX, F, 1, S ), }; -static const struct drm_i915_cmd_descriptor vecs_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { }}, ), }; -static const struct drm_i915_cmd_descriptor blt_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = { CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, .bits = {{ @@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { }; static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +/* + * For Gen9 we can still rely on the h/w to enforce cmd security, and only + * need to re-enforce the register access checks. We therefore only need to + * teach the cmdparser how to find the end of each command, and identify + * register accesses. The table doesn't need to reject any commands, and so + * the only commands listed here are: + * 1) Those that touch registers + * 2) Those that do not have the default 8-bit length + * + * Note that the default MI length mask chosen for this table is 0xFF, not + * the 0x3F used on older devices. This is because the vast majority of MI + * cmds on Gen9 use a standard 8-bit Length field. + * All the Gen9 blitter instructions are standard 0xFF length mask, and + * none allow access to non-general registers, so in fact no BLT cmds are + * included in the table at all. + * + */ +static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = { + CMD( MI_NOOP, SMI, F, 1, S ), + CMD( MI_USER_INTERRUPT, SMI, F, 1, S ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ), + CMD( MI_FLUSH, SMI, F, 1, S ), + CMD( MI_ARB_CHECK, SMI, F, 1, S ), + CMD( MI_REPORT_HEAD, SMI, F, 1, S ), + CMD( MI_ARB_ON_OFF, SMI, F, 1, S ), + CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ), + CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ), + CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), + CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ), + CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), + + /* + * We allow BB_START but apply further checks. We just sanitize the + * basic fields here. + */ +#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0) +#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1) + CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B, + .bits = {{ + .offset = 0, + .mask = MI_BB_START_OPERAND_MASK, + .expected = MI_BB_START_OPERAND_EXPECT, + }}, ), +}; + static const struct drm_i915_cmd_descriptor noop_desc = CMD(MI_NOOP, SMI, F, 1, S); @@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = #undef R #undef W #undef B -#undef M -static const struct drm_i915_cmd_table gen7_render_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table gen7_render_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, }; -static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, }; -static const struct drm_i915_cmd_table gen7_video_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { video_cmds, ARRAY_SIZE(video_cmds) }, +static const struct drm_i915_cmd_table gen7_video_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) }, }; -static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { vecs_cmds, ARRAY_SIZE(vecs_cmds) }, +static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) }, }; -static const struct drm_i915_cmd_table gen7_blt_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, }; -static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, }; +static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = { + { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) }, +}; + + /* * Register whitelists, sorted by increasing register offset. */ @@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; -static const struct drm_i915_reg_descriptor ivb_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), -}; - -static const struct drm_i915_reg_descriptor hsw_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), +static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), + REG64_IDX(BCS_GPR, 3), + REG64_IDX(BCS_GPR, 4), + REG64_IDX(BCS_GPR, 5), + REG64_IDX(BCS_GPR, 6), + REG64_IDX(BCS_GPR, 7), + REG64_IDX(BCS_GPR, 8), + REG64_IDX(BCS_GPR, 9), + REG64_IDX(BCS_GPR, 10), + REG64_IDX(BCS_GPR, 11), + REG64_IDX(BCS_GPR, 12), + REG64_IDX(BCS_GPR, 13), + REG64_IDX(BCS_GPR, 14), + REG64_IDX(BCS_GPR, 15), }; #undef REG64 @@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { struct drm_i915_reg_table { const struct drm_i915_reg_descriptor *regs; int num_regs; - bool master; }; static const struct drm_i915_reg_table ivb_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, }; static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, }; static const struct drm_i915_reg_table hsw_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) }, }; static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, +}; + +static const struct drm_i915_reg_table gen9_blt_reg_tables[] = { + { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) }, }; static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) @@ -670,7 +731,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) return 0xFF; } - DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header); + DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header); return 0; } @@ -693,7 +754,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) return 0xFF; } - DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header); + DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header); return 0; } @@ -706,7 +767,18 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) else if (client == INSTR_BC_CLIENT) return 0xFF; - DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + return 0; +} + +static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT) + return 0xFF; + + DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); return 0; } @@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN(engine->i915, 7)) + if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) && + engine->class == COPY_ENGINE_CLASS)) return; switch (engine->class) { case RENDER_CLASS: if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_render_ring_cmds; + cmd_tables = hsw_render_ring_cmd_table; cmd_table_count = - ARRAY_SIZE(hsw_render_ring_cmds); + ARRAY_SIZE(hsw_render_ring_cmd_table); } else { - cmd_tables = gen7_render_cmds; - cmd_table_count = ARRAY_SIZE(gen7_render_cmds); + cmd_tables = gen7_render_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table); } if (IS_HASWELL(engine->i915)) { @@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->reg_tables = ivb_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); } - engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; case VIDEO_DECODE_CLASS: - cmd_tables = gen7_video_cmds; - cmd_table_count = ARRAY_SIZE(gen7_video_cmds); + cmd_tables = gen7_video_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case COPY_ENGINE_CLASS: - if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_blt_ring_cmds; - cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + if (IS_GEN(engine->i915, 9)) { + cmd_tables = gen9_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table); + engine->get_cmd_length_mask = + gen9_blt_get_cmd_length_mask; + + /* BCS Engine unsafe without parser */ + engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER; + } else if (IS_HASWELL(engine->i915)) { + cmd_tables = hsw_blt_ring_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table); } else { - cmd_tables = gen7_blt_cmds; - cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); + cmd_tables = gen7_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table); } - if (IS_HASWELL(engine->i915)) { + if (IS_GEN(engine->i915, 9)) { + engine->reg_tables = gen9_blt_reg_tables; + engine->reg_table_count = + ARRAY_SIZE(gen9_blt_reg_tables); + } else if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { engine->reg_tables = ivb_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); } - - engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; case VIDEO_ENHANCEMENT_CLASS: - cmd_tables = hsw_vebox_cmds; - cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); + cmd_tables = hsw_vebox_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; @@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) return; } - engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER; + engine->flags |= I915_ENGINE_USING_CMD_PARSER; } /** @@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) */ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) { - if (!intel_engine_needs_cmd_parser(engine)) + if (!intel_engine_using_cmd_parser(engine)) return; fini_hash_table(engine); @@ -1029,119 +1112,98 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) } static const struct drm_i915_reg_descriptor * -find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, u32 addr) { const struct drm_i915_reg_table *table = engine->reg_tables; + const struct drm_i915_reg_descriptor *reg = NULL; int count = engine->reg_table_count; - for (; count > 0; ++table, --count) { - if (!table->master || is_master) { - const struct drm_i915_reg_descriptor *reg; + for (; !reg && (count > 0); ++table, --count) + reg = __find_reg(table->regs, table->num_regs, addr); - reg = __find_reg(table->regs, table->num_regs, addr); - if (reg != NULL) - return reg; - } - } - - return NULL; + return reg; } /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, - u32 batch_start_offset, - u32 batch_len, - bool *needs_clflush_after) + u32 offset, u32 length) { - unsigned int src_needs_clflush; - unsigned int dst_needs_clflush; + bool needs_clflush; void *dst, *src; int ret; - ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); - if (ret) - return ERR_PTR(ret); - dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); - i915_gem_object_finish_access(dst_obj); if (IS_ERR(dst)) return dst; - ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); + ret = i915_gem_object_pin_pages(src_obj); if (ret) { i915_gem_object_unpin_map(dst_obj); return ERR_PTR(ret); } + needs_clflush = + !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ); + src = ERR_PTR(-ENODEV); - if (src_needs_clflush && - i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) { + if (needs_clflush && i915_has_memcpy_from_wc()) { src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); if (!IS_ERR(src)) { - i915_memcpy_from_wc(dst, - src + batch_start_offset, - ALIGN(batch_len, 16)); + i915_unaligned_memcpy_from_wc(dst, + src + offset, + length); i915_gem_object_unpin_map(src_obj); } } if (IS_ERR(src)) { void *ptr; - int offset, n; - - offset = offset_in_page(batch_start_offset); + int x, n; - /* We can avoid clflushing partial cachelines before the write + /* + * We can avoid clflushing partial cachelines before the write * if we only every write full cache-lines. Since we know that * both the source and destination are in multiples of * PAGE_SIZE, we can simply round up to the next cacheline. * We don't care about copying too much here as we only * validate up to the end of the batch. */ - if (dst_needs_clflush & CLFLUSH_BEFORE) - batch_len = roundup(batch_len, - boot_cpu_data.x86_clflush_size); + if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + length = round_up(length, + boot_cpu_data.x86_clflush_size); ptr = dst; - for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) { - int len = min_t(int, batch_len, PAGE_SIZE - offset); + x = offset_in_page(offset); + for (n = offset >> PAGE_SHIFT; length; n++) { + int len = min_t(int, length, PAGE_SIZE - x); src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); - if (src_needs_clflush) - drm_clflush_virt_range(src + offset, len); - memcpy(ptr, src + offset, len); + if (needs_clflush) + drm_clflush_virt_range(src + x, len); + memcpy(ptr, src + x, len); kunmap_atomic(src); ptr += len; - batch_len -= len; - offset = 0; + length -= len; + x = 0; } } - i915_gem_object_finish_access(src_obj); + i915_gem_object_unpin_pages(src_obj); /* dst_obj is returned with vmap pinned */ - *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; - return dst; } static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, - const u32 *cmd, u32 length, - const bool is_master) + const u32 *cmd, u32 length) { if (desc->flags & CMD_DESC_SKIP) return true; if (desc->flags & CMD_DESC_REJECT) { - DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd); - return false; - } - - if ((desc->flags & CMD_DESC_MASTER) && !is_master) { - DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", - *cmd); + DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd); return false; } @@ -1158,11 +1220,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg(engine, is_master, reg_addr); + find_reg(engine, reg_addr); if (!reg) { - DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", - reg_addr, *cmd, engine->name); + DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", + reg_addr, *cmd, engine->name); return false; } @@ -1172,22 +1234,22 @@ static bool check_cmd(const struct intel_engine_cs *engine, */ if (reg->mask) { if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { - DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", - reg_addr); + DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n", + reg_addr); return false; } if (desc->cmd.value == MI_LOAD_REGISTER_REG) { - DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n", - reg_addr); + DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n", + reg_addr); return false; } if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { - DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n", - reg_addr); + DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n", + reg_addr); return false; } } @@ -1214,8 +1276,8 @@ static bool check_cmd(const struct intel_engine_cs *engine, } if (desc->bits[i].offset >= length) { - DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n", - *cmd, engine->name); + DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n", + *cmd, engine->name); return false; } @@ -1223,11 +1285,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, desc->bits[i].mask; if (dword != desc->bits[i].expected) { - DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n", - *cmd, - desc->bits[i].mask, - desc->bits[i].expected, - dword, engine->name); + DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n", + *cmd, + desc->bits[i].mask, + desc->bits[i].expected, + dword, engine->name); return false; } } @@ -1236,16 +1298,98 @@ static bool check_cmd(const struct intel_engine_cs *engine, return true; } +static int check_bbstart(u32 *cmd, u32 offset, u32 length, + u32 batch_length, + u64 batch_addr, + u64 shadow_addr, + const unsigned long *jump_whitelist) +{ + u64 jump_offset, jump_target; + u32 target_cmd_offset, target_cmd_index; + + /* For igt compatibility on older platforms */ + if (!jump_whitelist) { + DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n"); + return -EACCES; + } + + if (length != 3) { + DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n", + length); + return -EINVAL; + } + + jump_target = *(u64 *)(cmd + 1); + jump_offset = jump_target - batch_addr; + + /* + * Any underflow of jump_target is guaranteed to be outside the range + * of a u32, so >= test catches both too large and too small + */ + if (jump_offset >= batch_length) { + DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n", + jump_target); + return -EINVAL; + } + + /* + * This cannot overflow a u32 because we already checked jump_offset + * is within the BB, and the batch_length is a u32 + */ + target_cmd_offset = lower_32_bits(jump_offset); + target_cmd_index = target_cmd_offset / sizeof(u32); + + *(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset; + + if (target_cmd_index == offset) + return 0; + + if (IS_ERR(jump_whitelist)) + return PTR_ERR(jump_whitelist); + + if (!test_bit(target_cmd_index, jump_whitelist)) { + DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n", + jump_target); + return -EINVAL; + } + + return 0; +} + +static unsigned long *alloc_whitelist(u32 batch_length) +{ + unsigned long *jmp; + + /* + * We expect batch_length to be less than 256KiB for known users, + * i.e. we need at most an 8KiB bitmap allocation which should be + * reasonably cheap due to kmalloc caches. + */ + + /* Prefer to report transient allocation failure rather than hit oom */ + jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)), + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!jmp) + return ERR_PTR(-ENOMEM); + + return jmp; +} + #define LENGTH_BIAS 2 +static bool shadow_needs_clflush(struct drm_i915_gem_object *obj) +{ + return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); +} + /** - * i915_parse_cmds() - parse a submitted batch buffer for privilege violations + * intel_engine_cmd_parser() - parse a batch buffer for privilege violations * @engine: the engine on which the batch is to execute - * @batch_obj: the batch buffer in question - * @shadow_batch_obj: copy of the batch buffer in question - * @batch_start_offset: byte offset in the batch at which execution starts - * @batch_len: length of the commands in batch_obj - * @is_master: is the submitting process the drm master? + * @batch: the batch buffer in question + * @batch_offset: byte offset in the batch at which execution starts + * @batch_length: length of the commands in batch_obj + * @shadow: validated copy of the batch buffer in question + * @trampoline: whether to emit a conditional trampoline at the end of the batch * * Parses the specified batch buffer looking for privilege violations as * described in the overview. @@ -1254,90 +1398,146 @@ static bool check_cmd(const struct intel_engine_cs *engine, * if the batch appears legal but should use hardware parsing */ int intel_engine_cmd_parser(struct intel_engine_cs *engine, - struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, - u32 batch_start_offset, - u32 batch_len, - bool is_master) + struct i915_vma *batch, + u32 batch_offset, + u32 batch_length, + struct i915_vma *shadow, + bool trampoline) { - u32 *cmd, *batch_end; + u32 *cmd, *batch_end, offset = 0; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; - bool needs_clflush_after = false; + unsigned long *jump_whitelist; + u64 batch_addr, shadow_addr; int ret = 0; - cmd = copy_batch(shadow_batch_obj, batch_obj, - batch_start_offset, batch_len, - &needs_clflush_after); + GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd))); + GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd))); + GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length, + batch->size)); + GEM_BUG_ON(!batch_length); + + cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length); if (IS_ERR(cmd)) { - DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); + DRM_DEBUG("CMD: Failed to copy batch\n"); return PTR_ERR(cmd); } + jump_whitelist = NULL; + if (!trampoline) + /* Defer failure until attempted use */ + jump_whitelist = alloc_whitelist(batch_length); + + shadow_addr = gen8_canonical_addr(shadow->node.start); + batch_addr = gen8_canonical_addr(batch->node.start + batch_offset); + /* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra * space. Parsing should be faster in some cases this way. */ - batch_end = cmd + (batch_len / sizeof(*batch_end)); + batch_end = cmd + batch_length / sizeof(*batch_end); do { u32 length; - if (*cmd == MI_BATCH_BUFFER_END) { - if (needs_clflush_after) { - void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); - drm_clflush_virt_range(ptr, - (void *)(cmd + 1) - ptr); - } + if (*cmd == MI_BATCH_BUFFER_END) break; - } desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) { - DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", - *cmd); + DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd); ret = -EINVAL; break; } - /* - * If the batch buffer contains a chained batch, return an - * error that tells the caller to abort and dispatch the - * workload as a non-secure batch. - */ - if (desc->cmd.value == MI_BATCH_BUFFER_START) { - ret = -EACCES; - break; - } - if (desc->flags & CMD_DESC_FIXED) length = desc->length.fixed; else - length = ((*cmd & desc->length.mask) + LENGTH_BIAS); + length = (*cmd & desc->length.mask) + LENGTH_BIAS; if ((batch_end - cmd) < length) { - DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", - *cmd, - length, - batch_end - cmd); + DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", + *cmd, + length, + batch_end - cmd); ret = -EINVAL; break; } - if (!check_cmd(engine, desc, cmd, length, is_master)) { + if (!check_cmd(engine, desc, cmd, length)) { ret = -EACCES; break; } + if (desc->cmd.value == MI_BATCH_BUFFER_START) { + ret = check_bbstart(cmd, offset, length, batch_length, + batch_addr, shadow_addr, + jump_whitelist); + break; + } + + if (!IS_ERR_OR_NULL(jump_whitelist)) + __set_bit(offset, jump_whitelist); + cmd += length; + offset += length; if (cmd >= batch_end) { - DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); + DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); ret = -EINVAL; break; } } while (1); - i915_gem_object_unpin_map(shadow_batch_obj); + if (trampoline) { + /* + * With the trampoline, the shadow is executed twice. + * + * 1 - starting at offset 0, in privileged mode + * 2 - starting at offset batch_len, as non-privileged + * + * Only if the batch is valid and safe to execute, do we + * allow the first privileged execution to proceed. If not, + * we terminate the first batch and use the second batchbuffer + * entry to chain to the original unsafe non-privileged batch, + * leaving it to the HW to validate. + */ + *batch_end = MI_BATCH_BUFFER_END; + + if (ret) { + /* Batch unsafe to execute with privileges, cancel! */ + cmd = page_mask_bits(shadow->obj->mm.mapping); + *cmd = MI_BATCH_BUFFER_END; + + /* If batch is unsafe but valid, jump to the original */ + if (ret == -EACCES) { + unsigned int flags; + + flags = MI_BATCH_NON_SECURE_I965; + if (IS_HASWELL(engine->i915)) + flags = MI_BATCH_NON_SECURE_HSW; + + GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7)); + __gen6_emit_bb_start(batch_end, + batch_addr, + flags); + + ret = 0; /* allow execution */ + } + } + + if (shadow_needs_clflush(shadow->obj)) + drm_clflush_virt_range(batch_end, 8); + } + + if (shadow_needs_clflush(shadow->obj)) { + void *ptr = page_mask_bits(shadow->obj->mm.mapping); + + drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr); + } + + if (!IS_ERR_OR_NULL(jump_whitelist)) + kfree(jump_whitelist); + i915_gem_object_unpin_map(shadow->obj); return ret; } @@ -1357,7 +1557,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_uabi_engine(engine, dev_priv) { - if (intel_engine_needs_cmd_parser(engine)) { + if (intel_engine_using_cmd_parser(engine)) { active = true; break; } @@ -1382,6 +1582,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * the parser enabled. * 9. Don't whitelist or handle oacontrol specially, as ownership * for oacontrol state is moving to i915-perf. + * 10. Support for Gen9 BCS Parsing */ - return 9; + return 10; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ada57eee914a..d28468eaed57 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -44,6 +44,7 @@ #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" #include "gt/intel_rc6.h" +#include "gt/intel_rps.h" #include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" @@ -60,24 +61,14 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) static int i915_capabilities(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_device_info *info = INTEL_INFO(dev_priv); + struct drm_i915_private *i915 = node_to_i915(m->private); struct drm_printer p = drm_seq_file_printer(m); - const char *msg; - seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); - seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); - seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); + seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); - msg = "n/a"; -#ifdef CONFIG_INTEL_IOMMU - msg = enableddisabled(intel_iommu_gfx_mapped); -#endif - seq_printf(m, "iommu: %s\n", msg); - - intel_device_info_dump_flags(info, &p); - intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); - intel_driver_caps_print(&dev_priv->caps, &p); + intel_device_info_print_static(INTEL_INFO(i915), &p); + intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + intel_driver_caps_print(&i915->caps, &p); kernel_param_lock(THIS_MODULE); i915_params_dump(&i915_modparams, &p); @@ -791,7 +782,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_uncore *uncore = &dev_priv->uncore; - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; intel_wakeref_t wakeref; int ret = 0; @@ -827,23 +818,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); seq_printf(m, "actual GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); + intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); seq_printf(m, "current GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->cur_freq)); + intel_gpu_freq(rps, rps->cur_freq)); seq_printf(m, "max GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, "min GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->min_freq)); + intel_gpu_freq(rps, rps->min_freq)); seq_printf(m, "idle GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->idle_freq)); + intel_gpu_freq(rps, rps->idle_freq)); seq_printf(m, "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(dev_priv, rps->efficient_freq)); + intel_gpu_freq(rps, rps->efficient_freq)); } else if (INTEL_GEN(dev_priv) >= 6) { u32 rp_state_limits; u32 gt_perf_status; @@ -877,7 +868,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) else reqf >>= 25; } - reqf = intel_gpu_freq(dev_priv, reqf); + reqf = intel_gpu_freq(rps, reqf); rpmodectl = I915_READ(GEN6_RP_CONTROL); rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); @@ -890,8 +881,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; - cagf = intel_gpu_freq(dev_priv, - intel_get_cagf(dev_priv, rpstat)); + cagf = intel_rps_read_actual_frequency(rps); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); @@ -968,37 +958,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused) max_freq *= (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", - intel_gpu_freq(dev_priv, max_freq)); + intel_gpu_freq(rps, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; max_freq *= (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", - intel_gpu_freq(dev_priv, max_freq)); + intel_gpu_freq(rps, max_freq)); max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; max_freq *= (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", - intel_gpu_freq(dev_priv, max_freq)); + intel_gpu_freq(rps, max_freq)); seq_printf(m, "Max overclocked frequency: %dMHz\n", - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, "Current freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->cur_freq)); + intel_gpu_freq(rps, rps->cur_freq)); seq_printf(m, "Actual freq: %d MHz\n", cagf); seq_printf(m, "Idle freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->idle_freq)); + intel_gpu_freq(rps, rps->idle_freq)); seq_printf(m, "Min freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->min_freq)); + intel_gpu_freq(rps, rps->min_freq)); seq_printf(m, "Boost freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->boost_freq)); + intel_gpu_freq(rps, rps->boost_freq)); seq_printf(m, "Max freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(dev_priv, rps->efficient_freq)); + intel_gpu_freq(rps, rps->efficient_freq)); } else { seq_puts(m, "no P-state info available\n"); } @@ -1011,92 +1001,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) return ret; } -static void i915_instdone_info(struct drm_i915_private *dev_priv, - struct seq_file *m, - struct intel_instdone *instdone) -{ - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - int slice; - int subslice; - - seq_printf(m, "\t\tINSTDONE: 0x%08x\n", - instdone->instdone); - - if (INTEL_GEN(dev_priv) <= 3) - return; - - seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", - instdone->slice_common); - - if (INTEL_GEN(dev_priv) <= 6) - return; - - for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) - seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", - slice, subslice, instdone->sampler[slice][subslice]); - - for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) - seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", - slice, subslice, instdone->row[slice][subslice]); -} - -static int i915_hangcheck_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_gt *gt = &i915->gt; - struct intel_engine_cs *engine; - intel_wakeref_t wakeref; - enum intel_engine_id id; - - seq_printf(m, "Reset flags: %lx\n", gt->reset.flags); - if (test_bit(I915_WEDGED, >->reset.flags)) - seq_puts(m, "\tWedged\n"); - if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) - seq_puts(m, "\tDevice (global) reset in progress\n"); - - if (!i915_modparams.enable_hangcheck) { - seq_puts(m, "Hangcheck disabled\n"); - return 0; - } - - if (timer_pending(>->hangcheck.work.timer)) - seq_printf(m, "Hangcheck active, timer fires in %dms\n", - jiffies_to_msecs(gt->hangcheck.work.timer.expires - - jiffies)); - else if (delayed_work_pending(>->hangcheck.work)) - seq_puts(m, "Hangcheck active, work pending\n"); - else - seq_puts(m, "Hangcheck inactive\n"); - - seq_printf(m, "GT active? %s\n", yesno(gt->awake)); - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - for_each_engine(engine, i915, id) { - struct intel_instdone instdone; - - seq_printf(m, "%s: %d ms ago\n", - engine->name, - jiffies_to_msecs(jiffies - - engine->hangcheck.action_timestamp)); - - seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", - (long long)engine->hangcheck.acthd, - intel_engine_get_active_head(engine)); - - intel_engine_get_instdone(engine, &instdone); - - seq_puts(m, "\tinstdone read =\n"); - i915_instdone_info(i915, m, &instdone); - - seq_puts(m, "\tinstdone accu =\n"); - i915_instdone_info(i915, m, - &engine->hangcheck.instdone); - } - } - - return 0; -} - static int ironlake_drpc_info(struct seq_file *m) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -1461,7 +1365,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; unsigned int max_gpu_freq, min_gpu_freq; intel_wakeref_t wakeref; int gpu_freq, ia_freq; @@ -1486,10 +1390,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) GEN6_PCODE_READ_MIN_FREQ_TABLE, &ia_freq, NULL); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", - intel_gpu_freq(dev_priv, (gpu_freq * - (IS_GEN9_BC(dev_priv) || - INTEL_GEN(dev_priv) >= 10 ? - GEN9_FREQ_SCALER : 1))), + intel_gpu_freq(rps, + (gpu_freq * + (IS_GEN9_BC(dev_priv) || + INTEL_GEN(dev_priv) >= 10 ? + GEN9_FREQ_SCALER : 1))), ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); } @@ -1717,22 +1622,7 @@ static const char *rps_power_to_str(unsigned int power) static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 act_freq = rps->cur_freq; - intel_wakeref_t wakeref; - - with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - vlv_punit_get(dev_priv); - act_freq = vlv_punit_read(dev_priv, - PUNIT_REG_GPU_FREQ_STS); - vlv_punit_put(dev_priv); - act_freq = (act_freq >> 8) & 0xff; - } else { - act_freq = intel_get_cagf(dev_priv, - I915_READ(GEN6_RPSTAT1)); - } - } + struct intel_rps *rps = &dev_priv->gt.rps; seq_printf(m, "RPS enabled? %d\n", rps->enabled); seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake)); @@ -1740,17 +1630,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); seq_printf(m, "Frequency requested %d, actual %d\n", - intel_gpu_freq(dev_priv, rps->cur_freq), - intel_gpu_freq(dev_priv, act_freq)); + intel_gpu_freq(rps, rps->cur_freq), + intel_rps_read_actual_frequency(rps)); seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", - intel_gpu_freq(dev_priv, rps->min_freq), - intel_gpu_freq(dev_priv, rps->min_freq_softlimit), - intel_gpu_freq(dev_priv, rps->max_freq_softlimit), - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->min_freq), + intel_gpu_freq(rps, rps->min_freq_softlimit), + intel_gpu_freq(rps, rps->max_freq_softlimit), + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", - intel_gpu_freq(dev_priv, rps->idle_freq), - intel_gpu_freq(dev_priv, rps->efficient_freq), - intel_gpu_freq(dev_priv, rps->boost_freq)); + intel_gpu_freq(rps, rps->idle_freq), + intel_gpu_freq(rps, rps->efficient_freq), + intel_gpu_freq(rps, rps->boost_freq)); seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); @@ -1866,8 +1756,8 @@ static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log = &dev_priv->gt.uc.guc.log; enum guc_log_buffer_type type; - if (!intel_guc_log_relay_enabled(log)) { - seq_puts(m, "GuC log relay disabled\n"); + if (!intel_guc_log_relay_created(log)) { + seq_puts(m, "GuC log relay not created\n"); return; } @@ -1887,30 +1777,12 @@ static void i915_guc_log_info(struct seq_file *m, static int i915_guc_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_guc *guc = &dev_priv->gt.uc.guc; - struct intel_guc_client *client = guc->execbuf_client; if (!USES_GUC(dev_priv)) return -ENODEV; i915_guc_log_info(m, dev_priv); - if (!USES_GUC_SUBMISSION(dev_priv)) - return 0; - - GEM_BUG_ON(!guc->execbuf_client); - - seq_printf(m, "\nDoorbell map:\n"); - seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); - seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline); - - seq_printf(m, "\nGuC execbuf client @ %p:\n", client); - seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", - client->priority, - client->stage_id, - client->proc_desc_offset); - seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", - client->doorbell_id, client->doorbell_offset); /* Add more as required ... */ return 0; @@ -2054,9 +1926,23 @@ i915_guc_log_relay_write(struct file *filp, loff_t *ppos) { struct intel_guc_log *log = filp->private_data; + int val; + int ret; - intel_guc_log_relay_flush(log); - return cnt; + ret = kstrtoint_from_user(ubuf, cnt, 0, &val); + if (ret < 0) + return ret; + + /* + * Enable and start the guc log relay on value of 1. + * Flush log relay for any other value. + */ + if (val == 1) + ret = intel_guc_log_relay_start(log); + else + intel_guc_log_relay_flush(log); + + return ret ?: cnt; } static int i915_guc_log_relay_release(struct inode *inode, struct file *file) @@ -2194,8 +2080,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) status = "disabled"; seq_printf(m, "PSR mode: %s\n", status); - if (!psr->enabled) + if (!psr->enabled) { + seq_printf(m, "PSR sink not reliable: %s\n", + yesno(psr->sink_not_reliable)); + goto unlock; + } if (psr->psr2_enabled) { val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); @@ -2434,7 +2324,7 @@ out: } static void intel_seq_print_mode(struct seq_file *m, int tabs, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { int i; @@ -2445,59 +2335,35 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs, } static void intel_encoder_info(struct seq_file *m, - struct intel_crtc *intel_crtc, - struct intel_encoder *intel_encoder) + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct drm_crtc *crtc = &intel_crtc->base; - struct intel_connector *intel_connector; - struct drm_encoder *encoder; - - encoder = &intel_encoder->base; - seq_printf(m, "\tencoder %d: type: %s, connectors:\n", - encoder->base.id, encoder->name); - for_each_connector_on_encoder(dev, encoder, intel_connector) { - struct drm_connector *connector = &intel_connector->base; - seq_printf(m, "\t\tconnector %d: type: %s, status: %s", - connector->base.id, - connector->name, - drm_get_connector_status_name(connector->status)); - if (connector->status == connector_status_connected) { - struct drm_display_mode *mode = &crtc->mode; - seq_printf(m, ", mode:\n"); - intel_seq_print_mode(m, 2, mode); - } else { - seq_putc(m, '\n'); - } - } -} + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; -static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct drm_crtc *crtc = &intel_crtc->base; - struct intel_encoder *intel_encoder; - struct drm_plane_state *plane_state = crtc->primary->state; - struct drm_framebuffer *fb = plane_state->fb; + seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", + encoder->base.base.id, encoder->base.name); - if (fb) - seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", - fb->base.id, plane_state->src_x >> 16, - plane_state->src_y >> 16, fb->width, fb->height); - else - seq_puts(m, "\tprimary plane disabled\n"); - for_each_encoder_on_crtc(dev, crtc, intel_encoder) - intel_encoder_info(m, intel_crtc, intel_encoder); + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + const struct drm_connector_state *conn_state = + connector->state; + + if (conn_state->best_encoder != &encoder->base) + continue; + + seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + } + drm_connector_list_iter_end(&conn_iter); } static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) { - struct drm_display_mode *mode = panel->fixed_mode; + const struct drm_display_mode *mode = panel->fixed_mode; - seq_printf(m, "\tfixed mode:\n"); - intel_seq_print_mode(m, 2, mode); + seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } static void intel_hdcp_info(struct seq_file *m, @@ -2575,10 +2441,12 @@ static void intel_connector_info(struct seq_file *m, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_encoder *intel_encoder = intel_connector->encoder; - struct drm_display_mode *mode; + const struct drm_connector_state *conn_state = connector->state; + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + const struct drm_display_mode *mode; - seq_printf(m, "connector %d: type %s, status: %s\n", + seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", connector->base.id, connector->name, drm_get_connector_status_name(connector->status)); @@ -2592,24 +2460,24 @@ static void intel_connector_info(struct seq_file *m, drm_get_subpixel_order_name(connector->display_info.subpixel_order)); seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); - if (!intel_encoder) + if (!encoder) return; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: - if (intel_encoder->type == INTEL_OUTPUT_DP_MST) + if (encoder->type == INTEL_OUTPUT_DP_MST) intel_dp_mst_info(m, intel_connector); else intel_dp_info(m, intel_connector); break; case DRM_MODE_CONNECTOR_LVDS: - if (intel_encoder->type == INTEL_OUTPUT_LVDS) + if (encoder->type == INTEL_OUTPUT_LVDS) intel_lvds_info(m, intel_connector); break; case DRM_MODE_CONNECTOR_HDMIA: - if (intel_encoder->type == INTEL_OUTPUT_HDMI || - intel_encoder->type == INTEL_OUTPUT_DDI) + if (encoder->type == INTEL_OUTPUT_HDMI || + encoder->type == INTEL_OUTPUT_DDI) intel_hdmi_info(m, intel_connector); break; default: @@ -2656,70 +2524,88 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) rotation); } -static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) +static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_plane *intel_plane; + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + const struct drm_framebuffer *fb = plane_state->uapi.fb; + struct drm_format_name_buf format_name; + struct drm_rect src, dst; + char rot_str[48]; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane_state *state; - struct drm_plane *plane = &intel_plane->base; - struct drm_format_name_buf format_name; - char rot_str[48]; + src = drm_plane_state_src(&plane_state->uapi); + dst = drm_plane_state_dest(&plane_state->uapi); - if (!plane->state) { - seq_puts(m, "plane->state is NULL!\n"); - continue; - } + if (fb) + drm_get_format_name(fb->format->format, &format_name); - state = plane->state; + plane_rotation(rot_str, sizeof(rot_str), + plane_state->uapi.rotation); - if (state->fb) { - drm_get_format_name(state->fb->format->format, - &format_name); - } else { - sprintf(format_name.str, "N/A"); - } + seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + fb ? fb->base.id : 0, fb ? format_name.str : "n/a", + fb ? fb->width : 0, fb ? fb->height : 0, + DRM_RECT_FP_ARG(&src), + DRM_RECT_ARG(&dst), + rot_str); +} + +static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) +{ + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_format_name_buf format_name; + char rot_str[48]; + + if (!fb) + return; + + drm_get_format_name(fb->format->format, &format_name); + + plane_rotation(rot_str, sizeof(rot_str), + plane_state->hw.rotation); - plane_rotation(rot_str, sizeof(rot_str), state->rotation); - - seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", - plane->base.id, - plane_type(intel_plane->base.type), - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - (state->src_x >> 16), - ((state->src_x & 0xffff) * 15625) >> 10, - (state->src_y >> 16), - ((state->src_y & 0xffff) * 15625) >> 10, - (state->src_w >> 16), - ((state->src_w & 0xffff) * 15625) >> 10, - (state->src_h >> 16), - ((state->src_h & 0xffff) * 15625) >> 10, - format_name.str, - rot_str); + seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + fb->base.id, format_name.str, + fb->width, fb->height, + yesno(plane_state->uapi.visible), + DRM_RECT_FP_ARG(&plane_state->uapi.src), + DRM_RECT_ARG(&plane_state->uapi.dst), + rot_str); +} + +static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", + plane->base.base.id, plane->base.name, + plane_type(plane->base.type)); + intel_plane_uapi_info(m, plane); + intel_plane_hw_info(m, plane); } } -static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) +static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) { - struct intel_crtc_state *pipe_config; - int num_scalers = intel_crtc->num_scalers; + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int num_scalers = crtc->num_scalers; int i; - pipe_config = to_intel_crtc_state(intel_crtc->base.state); - /* Not all platformas have a scaler */ if (num_scalers) { seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", num_scalers, - pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id); + crtc_state->scaler_state.scaler_users, + crtc_state->scaler_state.scaler_id); for (i = 0; i < num_scalers; i++) { - struct intel_scaler *sc = - &pipe_config->scaler_state.scalers[i]; + const struct intel_scaler *sc = + &crtc_state->scaler_state.scalers[i]; seq_printf(m, ", scalers[%d]: use=%s, mode=%x", i, yesno(sc->in_use), sc->mode); @@ -2730,6 +2616,44 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) } } +static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_encoder *encoder; + + seq_printf(m, "[CRTC:%d:%s]:\n", + crtc->base.base.id, crtc->base.name); + + seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", + yesno(crtc_state->uapi.enable), + yesno(crtc_state->uapi.active), + DRM_MODE_ARG(&crtc_state->uapi.mode)); + + if (crtc_state->hw.enable) { + seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", + yesno(crtc_state->hw.active), + DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); + + seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", + crtc_state->pipe_src_w, crtc_state->pipe_src_h, + yesno(crtc_state->dither), crtc_state->pipe_bpp); + + intel_scaler_info(m, crtc); + } + + for_each_intel_encoder_mask(&dev_priv->drm, encoder, + crtc_state->uapi.encoder_mask) + intel_encoder_info(m, crtc, encoder); + + intel_plane_info(m, crtc); + + seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", + yesno(!crtc->cpu_fifo_underrun_disabled), + yesno(!crtc->pch_fifo_underrun_disabled)); +} + static int i915_display_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -2741,52 +2665,22 @@ static int i915_display_info(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + drm_modeset_lock_all(dev); + seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); - for_each_intel_crtc(dev, crtc) { - struct intel_crtc_state *pipe_config; - - drm_modeset_lock(&crtc->base.mutex, NULL); - pipe_config = to_intel_crtc_state(crtc->base.state); - - seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", - crtc->base.base.id, pipe_name(crtc->pipe), - yesno(pipe_config->base.active), - pipe_config->pipe_src_w, pipe_config->pipe_src_h, - yesno(pipe_config->dither), pipe_config->pipe_bpp); - - if (pipe_config->base.active) { - struct intel_plane *cursor = - to_intel_plane(crtc->base.cursor); - - intel_crtc_info(m, crtc); - - seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", - yesno(cursor->base.state->visible), - cursor->base.state->crtc_x, - cursor->base.state->crtc_y, - cursor->base.state->crtc_w, - cursor->base.state->crtc_h, - cursor->cursor.base); - intel_scaler_info(m, crtc); - intel_plane_info(m, crtc); - } - - seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", - yesno(!crtc->cpu_fifo_underrun_disabled), - yesno(!crtc->pch_fifo_underrun_disabled)); - drm_modeset_unlock(&crtc->base.mutex); - } + for_each_intel_crtc(dev, crtc) + intel_crtc_info(m, crtc); seq_printf(m, "\n"); seq_printf(m, "Connector info\n"); seq_printf(m, "--------------\n"); - mutex_lock(&dev->mode_config.mutex); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) intel_connector_info(m, connector); drm_connector_list_iter_end(&conn_iter); - mutex_unlock(&dev->mode_config.mutex); + + drm_modeset_unlock_all(dev); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); @@ -2822,7 +2716,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_printer p = drm_seq_file_printer(m); - intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); + intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); return 0; } @@ -3648,17 +3542,11 @@ i915_drop_caches_get(void *data, u64 *val) return 0; } - static int -i915_drop_caches_set(void *data, u64 val) +gt_drop_caches(struct intel_gt *gt, u64 val) { - struct drm_i915_private *i915 = data; - struct intel_gt *gt = &i915->gt; int ret; - DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", - val, val & DROP_ALL); - if (val & DROP_RESET_ACTIVE && wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) intel_gt_set_wedged(gt); @@ -3681,6 +3569,22 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt)) intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL); + return 0; +} + +static int +i915_drop_caches_set(void *data, u64 val) +{ + struct drm_i915_private *i915 = data; + int ret; + + DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", + val, val & DROP_ALL); + + ret = gt_drop_caches(&i915->gt, val); + if (ret) + return ret; + fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND); @@ -4218,11 +4122,11 @@ static int i915_drrs_ctl_set(void *data, u64 val) crtc_state = to_intel_crtc_state(crtc->base.state); - if (!crtc_state->base.active || + if (!crtc_state->hw.active || !crtc_state->has_drrs) goto out; - commit = crtc_state->base.commit; + commit = crtc_state->uapi.commit; if (commit) { ret = wait_for_completion_interruptible(&commit->hw_done); if (ret) @@ -4234,7 +4138,7 @@ static int i915_drrs_ctl_set(void *data, u64 val) struct intel_encoder *encoder; struct intel_dp *intel_dp; - if (!(crtc_state->base.connector_mask & + if (!(crtc_state->uapi.connector_mask & drm_connector_mask(connector))) continue; @@ -4293,14 +4197,14 @@ i915_fifo_underrun_reset_write(struct file *filp, return ret; crtc_state = to_intel_crtc_state(intel_crtc->base.state); - commit = crtc_state->base.commit; + commit = crtc_state->uapi.commit; if (commit) { ret = wait_for_completion_interruptible(&commit->hw_done); if (!ret) ret = wait_for_completion_interruptible(&commit->flip_done); } - if (!ret && crtc_state->base.active) { + if (!ret && crtc_state->hw.active) { DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n", pipe_name(intel_crtc->pipe)); @@ -4339,7 +4243,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, - {"i915_hangcheck_info", i915_hangcheck_info, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, @@ -4566,7 +4469,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Enabled: %s\n", - yesno(crtc_state->dsc_params.compression_enable)); + yesno(crtc_state->dsc.compression_enable)); seq_printf(m, "DSC_Sink_Support: %s\n", yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); seq_printf(m, "Force_DSC_Enable: %s\n", diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c795fa663bba..59525094d0e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,8 +61,10 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gem/i915_gem_mman.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_rc6.h" #include "i915_debugfs.h" #include "i915_drv.h" @@ -76,6 +78,7 @@ #include "i915_trace.h" #include "i915_vgpu.h" #include "intel_csr.h" +#include "intel_memory_region.h" #include "intel_pm.h" static struct drm_driver driver; @@ -295,9 +298,6 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915) if (ret) goto cleanup_vga_client; - /* must happen before intel_power_domains_init_hw() on VLV/CHV */ - intel_update_rawclk(i915); - intel_power_domains_init_hw(i915, false); intel_csr_ucode_init(i915); @@ -598,12 +598,10 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) intel_uc_init_mmio(&dev_priv->gt.uc); - ret = intel_engines_init_mmio(dev_priv); + ret = intel_engines_init_mmio(&dev_priv->gt); if (ret) goto err_uncore; - i915_gem_init_mmio(dev_priv); - return 0; err_uncore: @@ -621,7 +619,6 @@ err_bridge: */ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { - intel_engines_cleanup(dev_priv); intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); @@ -1055,7 +1052,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) */ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); - if (INTEL_GEN(dev_priv) < 9) + if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv)) return; if (IS_GEN9_LP(dev_priv)) @@ -1172,12 +1169,16 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_ggtt; - intel_gt_init_hw_early(dev_priv); + ret = intel_memory_regions_hw_probe(dev_priv); + if (ret) + goto err_ggtt; + + intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt); ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); - goto err_ggtt; + goto err_mem_regions; } pci_set_master(pdev); @@ -1194,7 +1195,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto err_ggtt; + goto err_mem_regions; } } @@ -1212,7 +1213,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto err_ggtt; + goto err_mem_regions; } } @@ -1264,6 +1265,8 @@ err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); pm_qos_remove_request(&dev_priv->pm_qos); +err_mem_regions: + intel_memory_regions_driver_release(dev_priv); err_ggtt: i915_ggtt_driver_release(dev_priv); err_perf: @@ -1393,8 +1396,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) INTEL_INFO(dev_priv)->platform), INTEL_GEN(dev_priv)); - intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p); - intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); + intel_device_info_print_static(INTEL_INFO(dev_priv), &p); + intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) @@ -1476,6 +1479,23 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; + /* + * Check if we support fake LMEM -- for now we only unleash this for + * the live selftests(test-and-exit). + */ +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) { + if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 && + i915_modparams.fake_lmem_start) { + mkwrite_device_info(dev_priv)->memory_regions = + REGION_SMEM | REGION_LMEM | REGION_STOLEN; + mkwrite_device_info(dev_priv)->is_dgfx = true; + GEM_BUG_ON(!HAS_LMEM(dev_priv)); + GEM_BUG_ON(!IS_DGFX(dev_priv)); + } + } +#endif + ret = pci_enable_device(pdev); if (ret) goto out_fini; @@ -1510,6 +1530,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_hw: i915_driver_hw_remove(dev_priv); + intel_memory_regions_driver_release(dev_priv); i915_ggtt_driver_release(dev_priv); out_cleanup_mmio: i915_driver_mmio_release(dev_priv); @@ -1548,10 +1569,7 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_driver_modeset_remove(i915); - /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&i915->gt.hangcheck.work); i915_reset_error_state(i915); - i915_gem_driver_remove(i915); intel_power_domains_driver_remove(i915); @@ -1570,6 +1588,7 @@ static void i915_driver_release(struct drm_device *dev) i915_gem_driver_release(dev_priv); + intel_memory_regions_driver_release(dev_priv); i915_ggtt_driver_release(dev_priv); i915_driver_mmio_release(dev_priv); @@ -1797,9 +1816,8 @@ static int i915_drm_resume(struct drm_device *dev) int ret; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - intel_gt_pm_disable(&dev_priv->gt); - i915_gem_sanitize(dev_priv); + intel_gt_sanitize(&dev_priv->gt, true); ret = i915_ggtt_enable_hw(dev_priv); if (ret) @@ -1928,12 +1946,8 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_display_power_resume_early(dev_priv); - intel_gt_pm_disable(&dev_priv->gt); - intel_power_domains_resume(dev_priv); - intel_gt_sanitize(&dev_priv->gt, true); - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; @@ -2646,18 +2660,12 @@ const struct dev_pm_ops i915_pm_ops = { .runtime_resume = intel_runtime_resume, }; -static const struct vm_operations_struct i915_gem_vm_ops = { - .fault = i915_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations i915_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, - .mmap = drm_gem_mmap, + .mmap = i915_gem_mmap, .poll = drm_poll, .read = drm_read, .compat_ioctl = i915_compat_ioctl, @@ -2704,7 +2712,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), @@ -2746,7 +2754,6 @@ static struct drm_driver driver = { .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, - .gem_vm_ops = &i915_gem_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -2757,7 +2764,8 @@ static struct drm_driver driver = { .get_scanout_position = i915_get_crtc_scanoutpos, .dumb_create = i915_gem_dumb_create, - .dumb_map_offset = i915_gem_mmap_gtt, + .dumb_map_offset = i915_gem_dumb_mmap_offset, + .ioctls = i915_ioctls, .num_ioctls = ARRAY_SIZE(i915_ioctls), .fops = &i915_driver_fops, @@ -2768,7 +2776,3 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_drm.c" -#endif diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8882c0908c3b..d05a968227f7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -101,6 +101,8 @@ #include "i915_vma.h" #include "i915_irq.h" +#include "intel_region_lmem.h" + #include "intel_gvt.h" /* General customization: @@ -108,8 +110,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20191021" -#define DRIVER_TIMESTAMP 1571651766 +#define DRIVER_DATE "20191223" +#define DRIVER_TIMESTAMP 1577120893 struct drm_i915_gem_object; @@ -271,11 +273,11 @@ struct drm_i915_display_funcs { int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state); int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state); void (*initial_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state); + struct intel_crtc *crtc); void (*atomic_update_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state); + struct intel_crtc *crtc); void (*optimize_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state); + struct intel_crtc *crtc); int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); @@ -288,10 +290,10 @@ struct drm_i915_display_funcs { struct intel_initial_plane_config *); int (*crtc_compute_clock)(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); - void (*crtc_enable)(struct intel_crtc_state *pipe_config, - struct intel_atomic_state *old_state); - void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, - struct intel_atomic_state *old_state); + void (*crtc_enable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*crtc_disable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); void (*commit_modeset_enables)(struct intel_atomic_state *state); void (*commit_modeset_disables)(struct intel_atomic_state *state); void (*audio_codec_enable)(struct intel_encoder *encoder, @@ -364,7 +366,6 @@ struct intel_fbc { unsigned threshold; unsigned int possible_framebuffer_bits; unsigned int busy_bits; - unsigned int visible_pipes_mask; struct intel_crtc *crtc; struct drm_mm_node compressed_fb; @@ -372,8 +373,8 @@ struct intel_fbc { bool false_color; - bool enabled; bool active; + bool activated; bool flip_pending; bool underrun_detected; @@ -385,9 +386,6 @@ struct intel_fbc { * these problems. */ struct intel_fbc_state_cache { - struct i915_vma *vma; - unsigned long flags; - struct { unsigned int mode_flags; u32 hsw_bdw_pixel_rate; @@ -416,6 +414,8 @@ struct intel_fbc { const struct drm_format_info *format; unsigned int stride; } fb; + u16 gen9_wa_cfb_stride; + s8 fence_id; } state_cache; /* @@ -426,9 +426,6 @@ struct intel_fbc { * are supposed to read from it in order to program the registers. */ struct intel_fbc_reg_params { - struct i915_vma *vma; - unsigned long flags; - struct { enum pipe pipe; enum i9xx_plane_id i9xx_plane; @@ -441,7 +438,9 @@ struct intel_fbc { } fb; int cfb_size; - unsigned int gen9_wa_cfb_stride; + u16 gen9_wa_cfb_stride; + s8 fence_id; + bool plane_visible; } params; const char *no_fbc_reason; @@ -543,94 +542,6 @@ struct i915_suspend_saved_registers { struct vlv_s0ix_state; -struct intel_rps_ei { - ktime_t ktime; - u32 render_c0; - u32 media_c0; -}; - -struct intel_rps { - struct mutex lock; /* protects enabling and the worker */ - - /* - * work, interrupts_enabled and pm_iir are protected by - * dev_priv->irq_lock - */ - struct work_struct work; - bool interrupts_enabled; - u32 pm_iir; - - /* PM interrupt bits that should never be masked */ - u32 pm_intrmsk_mbz; - - /* Frequencies are stored in potentially platform dependent multiples. - * In other words, *_freq needs to be multiplied by X to be interesting. - * Soft limits are those which are used for the dynamic reclocking done - * by the driver (raise frequencies under heavy loads, and lower for - * lighter loads). Hard limits are those imposed by the hardware. - * - * A distinction is made for overclocking, which is never enabled by - * default, and is considered to be above the hard limit if it's - * possible at all. - */ - u8 cur_freq; /* Current frequency (cached, may not == HW) */ - u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ - u8 max_freq_softlimit; /* Max frequency permitted by the driver */ - u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ - u8 min_freq; /* AKA RPn. Minimum frequency */ - u8 boost_freq; /* Frequency to request when wait boosting */ - u8 idle_freq; /* Frequency to request when we are idle */ - u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ - u8 rp1_freq; /* "less than" RP0 power/freqency */ - u8 rp0_freq; /* Non-overclocked max frequency. */ - u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ - - int last_adj; - - struct { - struct mutex mutex; - - enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; - unsigned int interactive; - - u8 up_threshold; /* Current %busy required to uplock */ - u8 down_threshold; /* Current %busy required to downclock */ - } power; - - bool enabled; - atomic_t num_waiters; - atomic_t boosts; - - /* manual wa residency calculations */ - struct intel_rps_ei ei; -}; - -struct intel_gen6_power_mgmt { - struct intel_rps rps; -}; - -/* defined intel_pm.c */ -extern spinlock_t mchdev_lock; - -struct intel_ilk_power_mgmt { - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; - u64 last_time2; - unsigned long gfx_power; - u8 corr; - - int c_m; - int r_t; -}; - #define MAX_L3_SLICES 2 struct intel_l3_parity { u32 *remap_info[MAX_L3_SLICES]; @@ -707,19 +618,18 @@ struct i915_gem_mm { #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ +/* Amount of SAGV/QGV points, BSpec precisely defines this */ +#define I915_NUM_QGV_POINTS 8 + struct ddi_vbt_port_info { /* Non-NULL if port present. */ const struct child_device_config *child; int max_tmds_clock; - /* - * This is an index in the HDMI/DVI DDI buffer translation table. - * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't - * populate this field. - */ -#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff + /* This is an index in the HDMI/DVI DDI buffer translation table. */ u8 hdmi_level_shift; + u8 hdmi_level_shift_set:1; u8 supports_dvi:1; u8 supports_hdmi:1; @@ -810,8 +720,7 @@ struct intel_vbt_data { int crt_ddc_pin; - int child_dev_num; - struct child_device_config *child_dev; + struct list_head display_devices; struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; struct sdvo_device_mapping sdvo_mappings[2]; @@ -977,6 +886,10 @@ struct intel_cdclk_state { u8 voltage_level; }; +struct i915_selftest_stash { + atomic_t counter; +}; + struct drm_i915_private { struct drm_device drm; @@ -1042,9 +955,6 @@ struct drm_i915_private { struct pci_dev *bridge_dev; - /* Context used internally to idle the GPU and setup initial state */ - struct i915_gem_context *kernel_context; - struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct rb_root uabi_engines; @@ -1067,7 +977,6 @@ struct drm_i915_private { u32 irq_mask; u32 de_irq_mask[I915_MAX_PIPES]; }; - u32 pm_rps_events; u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; @@ -1097,13 +1006,14 @@ struct drm_i915_private { unsigned int fdi_pll_freq; unsigned int czclk_freq; + /* + * For reading holding any crtc lock is sufficient, + * for writing must hold all of them. + */ struct { /* * The current logical cdclk state. * See intel_atomic_state.cdclk.logical - * - * For reading holding any crtc lock is sufficient, - * for writing must hold all of them. */ struct intel_cdclk_state logical; /* @@ -1173,6 +1083,10 @@ struct drm_i915_private { */ struct mutex dpll_lock; + /* + * For reading active_pipes, min_cdclk, min_voltage_level holding + * any crtc lock is sufficient, for writing must hold all of them. + */ u8 active_pipes; /* minimum acceptable cdclk for each pipe */ int min_cdclk[I915_MAX_PIPES]; @@ -1202,13 +1116,6 @@ struct drm_i915_private { */ u32 edram_size_mb; - /* gen6+ GT PM state */ - struct intel_gen6_power_mgmt gt_pm; - - /* ilk-only ips/rps state. Everything in here is protected by the global - * mchdev_lock in intel_pm.c */ - struct intel_ilk_power_mgmt ips; - struct i915_power_domains power_domains; struct i915_psr psr; @@ -1322,7 +1229,8 @@ struct drm_i915_private { } dram_info; struct intel_bw_info { - unsigned int deratedbw[3]; /* for each QGV point */ + /* for each QGV point */ + unsigned int deratedbw[I915_NUM_QGV_POINTS]; u8 num_qgv_points; u8 num_planes; } max_bw[6]; @@ -1337,8 +1245,6 @@ struct drm_i915_private { struct intel_gt gt; struct { - struct notifier_block pm_notifier; - struct i915_gem_contexts { spinlock_t lock; /* locks list */ struct list_head list; @@ -1348,6 +1254,8 @@ struct drm_i915_private { } contexts; } gem; + u8 pch_ssc_use; + /* For i915gm/i945gm vblank irq workaround */ u8 vblank_enabled; @@ -1373,6 +1281,8 @@ struct drm_i915_private { /* Mutex to protect the above hdcp component related values. */ struct mutex hdcp_comp_mutex; + I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) + /* * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * will be rejected. Instead look for a better place. @@ -1544,6 +1454,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, } #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) +#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) @@ -1700,9 +1611,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define VEBOX_MASK(dev_priv) \ ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) +/* + * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution + * All later gens can run the final buffer from the ppgtt + */ +#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7) + #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) +#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) @@ -1735,10 +1653,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) +#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ + (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) + /* WaRsDisableCoarsePowerGating:skl,cnl */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_CANNONLAKE(dev_priv) || \ - IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) + IS_GEN_RANGE(dev_priv, 9, 10) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ @@ -1781,6 +1701,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) +#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) @@ -1846,7 +1767,6 @@ void i915_driver_remove(struct drm_i915_private *i915); int i915_resume_switcheroo(struct drm_i915_private *i915); int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); -void intel_engine_init_hangcheck(struct intel_engine_cs *engine); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) @@ -1865,7 +1785,6 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, /* i915_gem.c */ int i915_gem_init_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); -void i915_gem_sanitize(struct drm_i915_private *i915); void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); @@ -1920,6 +1839,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags); #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) +#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); @@ -1932,9 +1852,6 @@ i915_mutex_lock_interruptible(struct drm_device *dev) int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, - u32 handle, u64 *offset); -int i915_gem_mmap_gtt_version(void); int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); @@ -1949,7 +1866,6 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, return atomic_read(&error->reset_engine_count[engine->uabi_class]); } -void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915); @@ -1958,7 +1874,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv); void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); -vm_fault_t i915_gem_fault(struct vm_fault *vmf); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); @@ -2002,9 +1917,6 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm); -void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915); -int i915_gem_init_memory_regions(struct drm_i915_private *i915); - /* i915_gem_internal.c */ struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *dev_priv, @@ -2031,11 +1943,12 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, - struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, - u32 batch_start_offset, - u32 batch_len, - bool is_master); + struct i915_vma *batch, + u32 batch_offset, + u32 batch_length, + struct i915_vma *shadow, + bool trampoline); +#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 /* intel_device_info.c */ static inline struct intel_device_info * @@ -2117,4 +2030,10 @@ i915_coherent_map_type(struct drm_i915_private *i915) return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; } +static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc) +{ + return intel_guc_is_submission_supported(guc) && + intel_guc_is_running(guc); +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd0a3271b4e2..9ddcf17230e6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -44,17 +44,13 @@ #include "gem/i915_gem_clflush.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" -#include "gem/i915_gem_pm.h" +#include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" -#include "gt/intel_mocs.h" -#include "gt/intel_reset.h" -#include "gt/intel_renderstate.h" #include "gt/intel_workarounds.h" #include "i915_drv.h" -#include "i915_scatterlist.h" #include "i915_trace.h" #include "i915_vgpu.h" @@ -116,33 +112,65 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags) { - struct i915_vma *vma; + struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm; LIST_HEAD(still_in_list); - int ret = 0; + intel_wakeref_t wakeref; + struct i915_vma *vma; + int ret; + + if (!atomic_read(&obj->bind_count)) + return 0; + + /* + * As some machines use ACPI to handle runtime-resume callbacks, and + * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex + * as they are required by the shrinker. Ergo, we wake the device up + * first just in case. + */ + wakeref = intel_runtime_pm_get(rpm); +try_again: + ret = 0; spin_lock(&obj->vma.lock); while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, struct i915_vma, obj_link))) { struct i915_address_space *vm = vma->vm; - ret = -EBUSY; + list_move_tail(&vma->obj_link, &still_in_list); + if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) + continue; + + ret = -EAGAIN; if (!i915_vm_tryopen(vm)) break; - list_move_tail(&vma->obj_link, &still_in_list); + /* Prevent vma being freed by i915_vma_parked as we unbind */ + vma = __i915_vma_get(vma); spin_unlock(&obj->vma.lock); - if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || - !i915_vma_is_active(vma)) - ret = i915_vma_unbind(vma); + if (vma) { + ret = -EBUSY; + if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || + !i915_vma_is_active(vma)) + ret = i915_vma_unbind(vma); + + __i915_vma_put(vma); + } i915_vm_close(vm); spin_lock(&obj->vma.lock); } - list_splice(&still_in_list, &obj->vma.list); + list_splice_init(&still_in_list, &obj->vma.list); spin_unlock(&obj->vma.lock); + if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) { + rcu_barrier(); /* flush the i915_vm_release() */ + goto try_again; + } + + intel_runtime_pm_put(rpm, wakeref); + return ret; } @@ -158,7 +186,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, * We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. */ - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); if (copy_from_user(vaddr, user_data, args->size)) return -EFAULT; @@ -166,7 +194,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, drm_clflush_virt_range(vaddr, args->size); intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); return 0; } @@ -586,7 +614,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, goto out_unpin; } - intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); user_data = u64_to_user_ptr(args->data_ptr); offset = args->offset; @@ -628,11 +656,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, user_data += page_length; offset += page_length; } - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + + intel_gt_flush_ggtt_writes(ggtt->vm.gt); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_unlock_fence(obj, fence); out_unpin: - intel_gt_flush_ggtt_writes(ggtt->vm.gt); if (drm_mm_node_allocated(&node)) { ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(ggtt, &node); @@ -718,7 +747,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, offset = 0; } - intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_unlock_fence(obj, fence); return ret; @@ -852,7 +881,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) list_for_each_entry_safe(obj, on, &i915->ggtt.userfault_list, userfault_link) - __i915_gem_object_release_mmap(obj); + __i915_gem_object_release_mmap_gtt(obj); /* * The fence will be lost when the device powers down. If any were @@ -889,8 +918,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_address_space *vm = &dev_priv->ggtt.vm; + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; int ret; @@ -899,17 +928,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, if (flags & PIN_MAPPABLE && (!view || view->type == I915_GGTT_VIEW_NORMAL)) { - /* If the required space is larger than the available + /* + * If the required space is larger than the available * aperture, we will not able to find a slot for the * object and unbinding the object now will be in * vain. Worse, doing so may cause us to ping-pong * the object in and out of the Global GTT and * waste a lot of cycles under the mutex. */ - if (obj->base.size > dev_priv->ggtt.mappable_end) + if (obj->base.size > ggtt->mappable_end) return ERR_PTR(-E2BIG); - /* If NONBLOCK is set the caller is optimistically + /* + * If NONBLOCK is set the caller is optimistically * trying to cache the full object within the mappable * aperture, and *must* have a fallback in place for * situations where we cannot bind the object. We @@ -925,11 +956,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, * we could try to minimise harm to others. */ if (flags & PIN_NONBLOCK && - obj->base.size > dev_priv->ggtt.mappable_end / 2) + obj->base.size > ggtt->mappable_end / 2) return ERR_PTR(-ENOSPC); } - vma = i915_vma_instance(obj, vm, view); + vma = i915_vma_instance(obj, &ggtt->vm, view); if (IS_ERR(vma)) return vma; @@ -939,7 +970,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOSPC); if (flags & PIN_MAPPABLE && - vma->fence_size > dev_priv->ggtt.mappable_end / 2) + vma->fence_size > ggtt->mappable_end / 2) return ERR_PTR(-ENOSPC); } @@ -949,9 +980,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, } if (vma->fence && !i915_gem_object_is_tiled(obj)) { - mutex_lock(&vma->vm->mutex); + mutex_lock(&ggtt->vm.mutex); ret = i915_vma_revoke_fence(vma); - mutex_unlock(&vma->vm->mutex); + mutex_unlock(&ggtt->vm.mutex); if (ret) return ERR_PTR(ret); } @@ -1037,184 +1068,6 @@ out: return err; } -void i915_gem_sanitize(struct drm_i915_private *i915) -{ - intel_wakeref_t wakeref; - - GEM_TRACE("\n"); - - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - - /* - * As we have just resumed the machine and woken the device up from - * deep PCI sleep (presumably D3_cold), assume the HW has been reset - * back to defaults, recovering from whatever wedged state we left it - * in and so worth trying to use the device once more. - */ - if (intel_gt_is_wedged(&i915->gt)) - intel_gt_unset_wedged(&i915->gt); - - /* - * If we inherit context state from the BIOS or earlier occupants - * of the GPU, the GPU may be in an inconsistent state when we - * try to take over. The only way to remove the earlier state - * is by resetting. However, resetting on earlier gen is tricky as - * it may impact the display and we are uncertain about the stability - * of the reset, so this could be applied to even earlier gen. - */ - intel_gt_sanitize(&i915->gt, false); - - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); -} - -static int __intel_engines_record_defaults(struct drm_i915_private *i915) -{ - struct i915_request *requests[I915_NUM_ENGINES] = {}; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - /* - * As we reset the gpu during very early sanitisation, the current - * register state on the GPU should reflect its defaults values. - * We load a context onto the hw (with restore-inhibit), then switch - * over to a second context to save that default register state. We - * can then prime every new context with that state so they all start - * from the same default HW values. - */ - - for_each_engine(engine, i915, id) { - struct intel_context *ce; - struct i915_request *rq; - - /* We must be able to switch to something! */ - GEM_BUG_ON(!engine->kernel_context); - engine->serial++; /* force the kernel context switch */ - - ce = intel_context_create(i915->kernel_context, engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - intel_context_put(ce); - goto out; - } - - err = intel_engine_emit_ctx_wa(rq); - if (err) - goto err_rq; - - err = intel_renderstate_emit(rq); - if (err) - goto err_rq; - -err_rq: - requests[id] = i915_request_get(rq); - i915_request_add(rq); - if (err) - goto out; - } - - /* Flush the default context image to memory, and enable powersaving. */ - if (!i915_gem_load_power_context(i915)) { - err = -EIO; - goto out; - } - - for (id = 0; id < ARRAY_SIZE(requests); id++) { - struct i915_request *rq; - struct i915_vma *state; - void *vaddr; - - rq = requests[id]; - if (!rq) - continue; - - /* We want to be able to unbind the state from the GGTT */ - GEM_BUG_ON(intel_context_is_pinned(rq->hw_context)); - - state = rq->hw_context->state; - if (!state) - continue; - - /* - * As we will hold a reference to the logical state, it will - * not be torn down with the context, and importantly the - * object will hold onto its vma (making it possible for a - * stray GTT write to corrupt our defaults). Unmap the vma - * from the GTT to prevent such accidents and reclaim the - * space. - */ - err = i915_vma_unbind(state); - if (err) - goto out; - - i915_gem_object_lock(state->obj); - err = i915_gem_object_set_to_cpu_domain(state->obj, false); - i915_gem_object_unlock(state->obj); - if (err) - goto out; - - i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC); - - /* Check we can acquire the image of the context state */ - vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto out; - } - - rq->engine->default_state = i915_gem_object_get(state->obj); - i915_gem_object_unpin_map(state->obj); - } - -out: - /* - * If we have to abandon now, we expect the engines to be idle - * and ready to be torn-down. The quickest way we can accomplish - * this is by declaring ourselves wedged. - */ - if (err) - intel_gt_set_wedged(&i915->gt); - - for (id = 0; id < ARRAY_SIZE(requests); id++) { - struct intel_context *ce; - struct i915_request *rq; - - rq = requests[id]; - if (!rq) - continue; - - ce = rq->hw_context; - i915_request_put(rq); - intel_context_put(ce); - } - return err; -} - -static int intel_engines_verify_workarounds(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - return 0; - - for_each_engine(engine, i915, id) { - if (intel_engine_verify_workarounds(engine, "load")) - err = -EIO; - } - - return err; -} - int i915_gem_init(struct drm_i915_private *dev_priv) { int ret; @@ -1224,8 +1077,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) mkwrite_device_info(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; - intel_timelines_init(dev_priv); - ret = i915_gem_init_userptr(dev_priv); if (ret) return ret; @@ -1233,53 +1084,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) intel_uc_fetch_firmwares(&dev_priv->gt.uc); intel_wopcm_init(&dev_priv->wopcm); - /* This is just a security blanket to placate dragons. - * On some systems, we very sporadically observe that the first TLBs - * used by the CS may be stale, despite us poking the TLB reset. If - * we hold the forcewake during initialisation these problems - * just magically go away. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - ret = i915_init_ggtt(dev_priv); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_unlock; } - intel_gt_init(&dev_priv->gt); - - ret = intel_engines_setup(dev_priv); - if (ret) { - GEM_BUG_ON(ret == -EIO); - goto err_unlock; - } - - ret = i915_gem_init_contexts(dev_priv); - if (ret) { - GEM_BUG_ON(ret == -EIO); - goto err_scratch; - } - - ret = intel_engines_init(dev_priv); - if (ret) { - GEM_BUG_ON(ret == -EIO); - goto err_context; - } - - intel_init_gt_powersave(dev_priv); - - intel_uc_init(&dev_priv->gt.uc); - - ret = intel_gt_init_hw(&dev_priv->gt); - if (ret) - goto err_uc_init; - - /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_gt_resume(&dev_priv->gt); - if (ret) - goto err_init_hw; - /* * Despite its name intel_init_clock_gating applies both display * clock gating workarounds; GT mmio workarounds and the occasional @@ -1291,23 +1101,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv) */ intel_init_clock_gating(dev_priv); - ret = intel_engines_verify_workarounds(dev_priv); - if (ret) - goto err_gt; - - ret = __intel_engines_record_defaults(dev_priv); - if (ret) - goto err_gt; - - ret = i915_inject_load_error(dev_priv, -ENODEV); - if (ret) - goto err_gt; - - ret = i915_inject_load_error(dev_priv, -EIO); + ret = intel_gt_init(&dev_priv->gt); if (ret) - goto err_gt; - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + goto err_unlock; return 0; @@ -1317,31 +1113,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * HW as irrevisibly wedged, but keep enough state around that the * driver doesn't explode during runtime. */ -err_gt: - intel_gt_set_wedged_on_init(&dev_priv->gt); - i915_gem_suspend(dev_priv); - i915_gem_suspend_late(dev_priv); - - i915_gem_drain_workqueue(dev_priv); -err_init_hw: - intel_uc_fini_hw(&dev_priv->gt.uc); -err_uc_init: - if (ret != -EIO) { - intel_uc_fini(&dev_priv->gt.uc); - intel_engines_cleanup(dev_priv); - } -err_context: - if (ret != -EIO) - i915_gem_driver_release__contexts(dev_priv); -err_scratch: - intel_gt_driver_release(&dev_priv->gt); err_unlock: - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + i915_gem_drain_workqueue(dev_priv); if (ret != -EIO) { intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); - intel_timelines_fini(dev_priv); } if (ret == -EIO) { @@ -1385,38 +1162,30 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) i915_gem_suspend_late(dev_priv); intel_gt_driver_remove(&dev_priv->gt); + dev_priv->uabi_engines = RB_ROOT; /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); - intel_uc_fini_hw(&dev_priv->gt.uc); - intel_uc_fini(&dev_priv->gt.uc); - i915_gem_drain_freed_objects(dev_priv); } void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - intel_engines_cleanup(dev_priv); - i915_gem_driver_release__contexts(dev_priv); intel_gt_driver_release(&dev_priv->gt); intel_wa_list_free(&dev_priv->gt_wa_list); intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); - intel_timelines_fini(dev_priv); + + i915_gem_driver_release__contexts(dev_priv); i915_gem_drain_freed_objects(dev_priv); WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); } -void i915_gem_init_mmio(struct drm_i915_private *i915) -{ - i915_gem_sanitize(i915); -} - static void i915_gem_init__mm(struct drm_i915_private *i915) { spin_lock_init(&i915->mm.obj_lock); @@ -1432,7 +1201,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) void i915_gem_init_early(struct drm_i915_private *dev_priv) { i915_gem_init__mm(dev_priv); - i915_gem_init__pm(dev_priv); + i915_gem_init__contexts(dev_priv); spin_lock_init(&dev_priv->fb_tracking.lock); } diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 05d23d013b68..1753c84d6c0d 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -30,6 +30,8 @@ #include <drm/drm_drv.h> +#include "i915_utils.h" + struct drm_i915_private; #ifdef CONFIG_DRM_I915_DEBUG_GEM @@ -39,6 +41,7 @@ struct drm_i915_private; #define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \ GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \ __func__, __LINE__, __stringify(condition)); \ + GEM_TRACE_DUMP(); \ BUG(); \ } \ } while(0) @@ -68,9 +71,10 @@ struct drm_i915_private; pr_err(__VA_ARGS__); \ trace_printk(__VA_ARGS__); \ } while (0) -#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL) +#define GEM_TRACE_DUMP() \ + do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0) #define GEM_TRACE_DUMP_ON(expr) \ - do { if (expr) ftrace_dump(DUMP_ALL); } while (0) + do { if (expr) GEM_TRACE_DUMP(); } while (0) #else #define GEM_TRACE(...) do { } while (0) #define GEM_TRACE_ERR(...) do { } while (0) @@ -112,18 +116,4 @@ static inline bool __tasklet_is_scheduled(struct tasklet_struct *t) return test_bit(TASKLET_STATE_SCHED, &t->state); } -static inline void cancel_timer(struct timer_list *t) -{ - if (!READ_ONCE(t->expires)) - return; - - del_timer(t); - WRITE_ONCE(t->expires, 0); -} - -static inline bool timer_expired(const struct timer_list *t) -{ - return READ_ONCE(t->expires) && !timer_pending(t); -} - #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 7e62c310290f..0697bedebeef 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -359,9 +359,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, */ int i915_gem_evict_vm(struct i915_address_space *vm) { - struct list_head eviction_list; - struct i915_vma *vma, *next; - int ret; + int ret = 0; lockdep_assert_held(&vm->mutex); trace_i915_gem_evict_vm(vm); @@ -377,21 +375,30 @@ int i915_gem_evict_vm(struct i915_address_space *vm) return ret; } - INIT_LIST_HEAD(&eviction_list); - list_for_each_entry(vma, &vm->bound_list, vm_link) { - if (i915_vma_is_pinned(vma)) - continue; + do { + struct i915_vma *vma, *vn; + LIST_HEAD(eviction_list); - __i915_vma_pin(vma); - list_add(&vma->evict_link, &eviction_list); - } + list_for_each_entry(vma, &vm->bound_list, vm_link) { + if (i915_vma_is_pinned(vma)) + continue; + + __i915_vma_pin(vma); + list_add(&vma->evict_link, &eviction_list); + } + if (list_empty(&eviction_list)) + break; + + ret = 0; + list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { + __i915_vma_unpin(vma); + if (ret == 0) + ret = __i915_vma_unbind(vma); + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } + } while (ret == 0); - ret = 0; - list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { - __i915_vma_unpin(vma); - if (ret == 0) - ret = __i915_vma_unbind(vma); - } return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 321189e1b0f2..71efccfde122 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -846,8 +846,10 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) detect_bit_6_swizzle(ggtt); - if (INTEL_GEN(i915) >= 7 && - !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))) + if (!i915_ggtt_has_aperture(ggtt)) + num_fences = 0; + else if (INTEL_GEN(i915) >= 7 && + !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))) num_fences = 32; else if (INTEL_GEN(i915) >= 4 || IS_I945G(i915) || IS_I945GM(i915) || diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3148d5946b63..1efe58ad0ce9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -53,6 +53,8 @@ #define DBG(...) #endif +#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ + /** * DOC: Global GTT views * @@ -123,6 +125,16 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) { struct intel_uncore *uncore = ggtt->vm.gt->uncore; + spin_lock_irq(&uncore->lock); + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6); + spin_unlock_irq(&uncore->lock); +} + +static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + /* * Note that as an uncached mmio write, this will flush the * WCB of the writes into the GGTT before it triggers the invalidate. @@ -135,7 +147,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) struct intel_uncore *uncore = ggtt->vm.gt->uncore; struct drm_i915_private *i915 = ggtt->vm.i915; - gen6_ggtt_invalidate(ggtt); + gen8_ggtt_invalidate(ggtt); if (INTEL_GEN(i915) >= 12) intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, @@ -522,7 +534,7 @@ void __i915_vm_close(struct i915_address_space *vm) atomic_and(~I915_VMA_PIN_MASK, &vma->flags); WARN_ON(__i915_vma_unbind(vma)); - i915_vma_destroy(vma); + __i915_vma_put(vma); i915_gem_object_put(obj); } @@ -783,7 +795,7 @@ __set_pd_entry(struct i915_page_directory * const pd, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { /* Each thread pre-pins the pd, and we may have a thread per pde. */ - GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry)); + GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); atomic_inc(px_used(pd)); pd->entry[idx] = to; @@ -1118,7 +1130,7 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, atomic_add(count, &pt->used); /* All other pdes may be simultaneously removed */ - GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES); + GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES); *start += count; } } while (idx++, --len); @@ -1363,12 +1375,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) * If everybody agrees to not to write into the scratch page, * we can reuse it for all vm, keeping contexts and processes separate. */ - if (vm->has_read_only && - vm->i915->kernel_context && - vm->i915->kernel_context->vm) { - struct i915_address_space *clone = - rcu_dereference_protected(vm->i915->kernel_context->vm, - true); /* static */ + if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { + struct i915_address_space *clone = vm->gt->vm; GEM_BUG_ON(!clone->has_read_only); @@ -1683,6 +1691,28 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } +static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) +{ + struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_table *pt; + unsigned int pde; + + start = round_down(start, SZ_64K); + end = round_up(end, SZ_64K) - start; + + mutex_lock(&ppgtt->flush); + + gen6_for_each_pde(pt, pd, start, end, pde) + gen6_write_pde(ppgtt, pde, pt); + + mb(); + ioread32(ppgtt->pd_addr + pde - 1); + gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); + mb(); + + mutex_unlock(&ppgtt->flush); +} + static int gen6_alloc_va_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -1692,7 +1722,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, intel_wakeref_t wakeref; u64 from = start; unsigned int pde; - bool flush = false; int ret = 0; wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); @@ -1717,11 +1746,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, spin_lock(&pd->lock); if (pd->entry[pde] == &vm->scratch[1]) { pd->entry[pde] = pt; - if (i915_vma_is_bound(ppgtt->vma, - I915_VMA_GLOBAL_BIND)) { - gen6_write_pde(ppgtt, pde, pt); - flush = true; - } } else { alloc = pt; pt = pd->entry[pde]; @@ -1732,8 +1756,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, } spin_unlock(&pd->lock); - if (flush) - gen6_ggtt_invalidate(vm->gt->ggtt); + if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) + gen6_flush_pd(ppgtt, from, start); goto out; @@ -1788,11 +1812,12 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - i915_vma_destroy(ppgtt->vma); + __i915_vma_put(ppgtt->vma); gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); + mutex_destroy(&ppgtt->flush); mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt->base.pd); } @@ -1817,17 +1842,11 @@ static int pd_vma_bind(struct i915_vma *vma, struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); struct gen6_ppgtt *ppgtt = vma->private; u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; - struct i915_page_table *pt; - unsigned int pde; px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; - gen6_for_all_pdes(pt, ppgtt->base.pd, pde) - gen6_write_pde(ppgtt, pde, pt); - - gen6_ggtt_invalidate(ggtt); - + gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); return 0; } @@ -1876,6 +1895,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) i915_active_init(&vma->active, NULL, NULL); + kref_init(&vma->ref); mutex_init(&vma->pages_mutex); vma->vm = i915_vm_get(&ggtt->vm); vma->ops = &pd_vma_ops; @@ -1917,9 +1937,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base) * size. We allocate at the top of the GTT to avoid fragmentation. */ if (!atomic_read(&ppgtt->pin_count)) { - err = i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); + err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH); } if (!err) atomic_inc(&ppgtt->pin_count); @@ -1958,6 +1976,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + mutex_init(&ppgtt->flush); mutex_init(&ppgtt->pin_mutex); ppgtt_init(&ppgtt->base, &i915->gt); @@ -1994,6 +2013,7 @@ err_scratch: err_pd: kfree(ppgtt->base.pd); err_free: + mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt); return ERR_PTR(err); } @@ -2609,8 +2629,6 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; - ppgtt->vm.total = ggtt->vm.total; - return 0; err_ppgtt: @@ -2661,7 +2679,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt) static void cleanup_init_ggtt(struct i915_ggtt *ggtt) { ggtt_release_guc_top(ggtt); - drm_mm_remove_node(&ggtt->error_capture); + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_mm_remove_node(&ggtt->error_capture); } static int init_ggtt(struct i915_ggtt *ggtt) @@ -2692,13 +2711,15 @@ static int init_ggtt(struct i915_ggtt *ggtt) if (ret) return ret; - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, - PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; + if (ggtt->mappable_end) { + /* Reserve a mappable slot for our lockless error capture */ + ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, + PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + if (ret) + return ret; + } /* * The upper portion of the GuC address space has a sizeable hole @@ -2744,59 +2765,6 @@ int i915_init_ggtt(struct drm_i915_private *i915) return 0; } -void i915_gem_cleanup_memory_regions(struct drm_i915_private *i915) -{ - int i; - - for (i = 0; i < INTEL_REGION_UNKNOWN; i++) { - struct intel_memory_region *region = i915->mm.regions[i]; - - if (region) - intel_memory_region_put(region); - } -} - -int i915_gem_init_memory_regions(struct drm_i915_private *i915) -{ - int err, i; - - for (i = 0; i < INTEL_REGION_UNKNOWN; i++) { - struct intel_memory_region *mem = ERR_PTR(-ENODEV); - u32 type; - - if (!HAS_REGION(i915, BIT(i))) - continue; - - type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]); - switch (type) { - case INTEL_MEMORY_SYSTEM: - mem = i915_gem_shmem_setup(i915); - break; - case INTEL_MEMORY_STOLEN: - mem = i915_gem_stolen_setup(i915); - break; - } - - if (IS_ERR(mem)) { - err = PTR_ERR(mem); - DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type); - goto out_cleanup; - } - - mem->id = intel_region_map[i]; - mem->type = type; - mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]); - - i915->mm.regions[i] = mem; - } - - return 0; - -out_cleanup: - i915_gem_cleanup_memory_regions(i915); - return err; -} - static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) { struct i915_vma *vma, *vn; @@ -2823,7 +2791,9 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) i915_address_space_fini(&ggtt->vm); arch_phys_wc_del(ggtt->mtrr); - io_mapping_fini(&ggtt->iomap); + + if (ggtt->iomap.size) + io_mapping_fini(&ggtt->iomap); } /** @@ -2834,8 +2804,6 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915) { struct pagevec *pvec; - i915_gem_cleanup_memory_regions(i915); - fini_aliasing_ppgtt(&i915->ggtt); ggtt_cleanup_hw(&i915->ggtt); @@ -2922,35 +2890,51 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return 0; } -static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv) +static void tgl_setup_private_ppat(struct intel_uncore *uncore) { /* TGL doesn't support LLC or AGE settings */ - I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC); - I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT); - I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB); -} - -static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC); - I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); + intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); +} + +static void cnl_setup_private_ppat(struct intel_uncore *uncore) +{ + intel_uncore_write(uncore, + GEN10_PAT_INDEX(0), + GEN8_PPAT_WB | GEN8_PPAT_LLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(1), + GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(2), + GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(3), + GEN8_PPAT_UC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(4), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(5), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(6), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(7), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability * bits. When using advanced contexts each context stores its own PAT, but * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) +static void bdw_setup_private_ppat(struct intel_uncore *uncore) { u64 pat; @@ -2963,11 +2947,11 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } -static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) +static void chv_setup_private_ppat(struct intel_uncore *uncore) { u64 pat; @@ -2999,8 +2983,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, CHV_PPAT_SNOOP) | GEN8_PPAT(7, CHV_PPAT_SNOOP); - I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } static void gen6_gmch_remove(struct i915_address_space *vm) @@ -3011,18 +2995,26 @@ static void gen6_gmch_remove(struct i915_address_space *vm) cleanup_scratch_page(vm); } -static void setup_private_pat(struct drm_i915_private *dev_priv) +static void setup_private_pat(struct intel_uncore *uncore) { - GEM_BUG_ON(INTEL_GEN(dev_priv) < 8); + struct drm_i915_private *i915 = uncore->i915; - if (INTEL_GEN(dev_priv) >= 12) - tgl_setup_private_ppat(dev_priv); - else if (INTEL_GEN(dev_priv) >= 10) - cnl_setup_private_ppat(dev_priv); - else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) - chv_setup_private_ppat(dev_priv); + GEM_BUG_ON(INTEL_GEN(i915) < 8); + + if (INTEL_GEN(i915) >= 12) + tgl_setup_private_ppat(uncore); + else if (INTEL_GEN(i915) >= 10) + cnl_setup_private_ppat(uncore); + else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) + chv_setup_private_ppat(uncore); else - bdw_setup_private_ppat(dev_priv); + bdw_setup_private_ppat(uncore); +} + +static struct resource pci_resource(struct pci_dev *pdev, int bar) +{ + return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) @@ -3034,10 +3026,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) int err; /* TODO: We're not aware of mappable constraints on gen8 yet */ - ggtt->gmadr = - (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - ggtt->mappable_end = resource_size(&ggtt->gmadr); + if (!IS_DGFX(dev_priv)) { + ggtt->gmadr = pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); if (!err) @@ -3069,7 +3061,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; } - ggtt->invalidate = gen6_ggtt_invalidate; + ggtt->invalidate = gen8_ggtt_invalidate; ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; @@ -3078,7 +3070,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.pte_encode = gen8_pte_encode; - setup_private_pat(dev_priv); + setup_private_pat(ggtt->vm.gt->uncore); return ggtt_probe_common(ggtt, size); } @@ -3260,14 +3252,17 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; - if (!io_mapping_init_wc(&ggtt->iomap, - ggtt->gmadr.start, - ggtt->mappable_end)) { - ggtt->vm.cleanup(&ggtt->vm); - return -EIO; - } + if (ggtt->mappable_end) { + if (!io_mapping_init_wc(&ggtt->iomap, + ggtt->gmadr.start, + ggtt->mappable_end)) { + ggtt->vm.cleanup(&ggtt->vm); + return -EIO; + } - ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); + ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, + ggtt->mappable_end); + } i915_ggtt_init_fences(ggtt); @@ -3293,15 +3288,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) if (ret) return ret; - ret = i915_gem_init_memory_regions(dev_priv); - if (ret) - goto out_gtt_cleanup; - return 0; - -out_gtt_cleanup: - dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm); - return ret; } int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) @@ -3314,7 +3301,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) { - GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate); + GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate); ggtt->invalidate = guc_ggtt_invalidate; @@ -3324,13 +3311,13 @@ void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) { /* XXX Temporary pardon for error unload */ - if (ggtt->invalidate == gen6_ggtt_invalidate) + if (ggtt->invalidate == gen8_ggtt_invalidate) return; /* We should only be called after i915_ggtt_enable_guc() */ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); - ggtt->invalidate = gen6_ggtt_invalidate; + ggtt->invalidate = gen8_ggtt_invalidate; ggtt->invalidate(ggtt); } @@ -3382,10 +3369,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) { - ggtt_restore_mappings(&i915->ggtt); + struct i915_ggtt *ggtt = &i915->ggtt; + + ggtt_restore_mappings(ggtt); if (INTEL_GEN(i915) >= 8) - setup_private_pat(i915); + setup_private_pat(ggtt->vm.gt->uncore); } static struct scatterlist * diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index f074f1de66e8..31a4a96ddd0d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -443,6 +443,7 @@ struct i915_ppgtt { struct gen6_ppgtt { struct i915_ppgtt base; + struct mutex flush; struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; @@ -575,6 +576,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); int i915_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); +static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) +{ + return ggtt->mappable_end > 0; +} + int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index ad33fbe90a28..54fce81d5724 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -2,6 +2,7 @@ * SPDX-License-Identifier: MIT */ +#include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" #include "i915_drv.h" @@ -63,7 +64,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); break; case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); + value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN); break; case I915_PARAM_CMD_PARSER_VERSION: value = i915_cmd_parser_get_version(i915); diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c index be127cd28931..3aa213684293 100644 --- a/drivers/gpu/drm/i915/i915_globals.c +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -20,7 +20,10 @@ static LIST_HEAD(globals); static atomic_t active; static atomic_t epoch; static struct park_work { - struct rcu_work work; + struct delayed_work work; + struct rcu_head rcu; + unsigned long flags; +#define PENDING 0 int epoch; } park; @@ -37,11 +40,33 @@ static void i915_globals_shrink(void) global->shrink(); } +static void __i915_globals_grace(struct rcu_head *rcu) +{ + /* Ratelimit parking as shrinking is quite slow */ + schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ)); +} + +static void __i915_globals_queue_rcu(void) +{ + park.epoch = atomic_inc_return(&epoch); + if (!atomic_read(&active)) { + init_rcu_head(&park.rcu); + call_rcu(&park.rcu, __i915_globals_grace); + } +} + static void __i915_globals_park(struct work_struct *work) { + destroy_rcu_head(&park.rcu); + /* Confirm nothing woke up in the last grace period */ - if (park.epoch == atomic_read(&epoch)) - i915_globals_shrink(); + if (park.epoch != atomic_read(&epoch)) { + __i915_globals_queue_rcu(); + return; + } + + clear_bit(PENDING, &park.flags); + i915_globals_shrink(); } void __init i915_global_register(struct i915_global *global) @@ -85,7 +110,7 @@ int __init i915_globals_init(void) } } - INIT_RCU_WORK(&park.work, __i915_globals_park); + INIT_DELAYED_WORK(&park.work, __i915_globals_park); return 0; } @@ -103,8 +128,9 @@ void i915_globals_park(void) if (!atomic_dec_and_test(&active)) return; - park.epoch = atomic_inc_return(&epoch); - queue_rcu_work(system_wq, &park.work); + /* Queue cleanup after the next RCU grace period has freed slabs */ + if (!test_and_set_bit(PENDING, &park.flags)) + __i915_globals_queue_rcu(); } void i915_globals_unpark(void) @@ -113,12 +139,21 @@ void i915_globals_unpark(void) atomic_inc(&active); } +static void __exit __i915_globals_flush(void) +{ + atomic_inc(&active); /* skip shrinking */ + + rcu_barrier(); /* wait for the work to be queued */ + flush_delayed_work(&park.work); + + atomic_dec(&active); +} + void __exit i915_globals_exit(void) { - /* Flush any residual park_work */ - atomic_inc(&epoch); - flush_rcu_work(&park.work); + GEM_BUG_ON(atomic_read(&active)); + __i915_globals_flush(); __i915_globals_cleanup(); /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5cf4eed5add8..fda0977d2059 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -40,6 +40,7 @@ #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" +#include "gem/i915_gem_lmem.h" #include "i915_drv.h" #include "i915_gpu_error.h" @@ -235,6 +236,7 @@ struct compress { struct pagevec pool; struct z_stream_s zstream; void *tmp; + bool wc; }; static bool compress_init(struct compress *c) @@ -292,7 +294,7 @@ static int compress_page(struct compress *c, struct z_stream_s *zstream = &c->zstream; zstream->next_in = src; - if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) + if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) zstream->next_in = c->tmp; zstream->avail_in = PAGE_SIZE; @@ -367,6 +369,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) struct compress { struct pagevec pool; + bool wc; }; static bool compress_init(struct compress *c) @@ -389,7 +392,7 @@ static int compress_page(struct compress *c, if (!ptr) return -ENOMEM; - if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) + if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; @@ -534,10 +537,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, } err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); - err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", - jiffies_to_msecs(ee->hangcheck_timestamp - epoch), - ee->hangcheck_timestamp, - ee->hangcheck_timestamp == epoch ? "; epoch" : ""); err_printf(m, " engine reset count: %u\n", ee->reset_count); for (n = 0; n < ee->num_ports; n++) { @@ -600,9 +599,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, { struct drm_printer p = i915_error_printer(m); - intel_device_info_dump_flags(info, &p); + intel_device_info_print_static(info, &p); + intel_device_info_print_runtime(runtime, &p); + intel_device_info_print_topology(&runtime->sseu, &p); intel_driver_caps_print(caps, &p); - intel_device_info_dump_topology(&runtime->sseu, &p); } static void err_print_params(struct drm_i915_error_state_buf *m, @@ -679,11 +679,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, ts = ktime_to_timespec64(error->uptime); err_printf(m, "Uptime: %lld s %ld us\n", (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); - err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ); - err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n", - error->capture, - jiffies_to_msecs(jiffies - error->capture), - jiffies_to_msecs(error->capture - error->epoch)); + err_printf(m, "Capture: %lu jiffies; %d ms ago\n", + error->capture, jiffies_to_msecs(jiffies - error->capture)); for (ee = error->engine; ee; ee = ee->next) err_printf(m, "Active process (on ring %s): %s [%d]\n", @@ -741,8 +738,21 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (IS_GEN_RANGE(m->i915, 8, 11)) err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache); + if (IS_GEN(m->i915, 12)) + err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err); + + if (INTEL_GEN(m->i915) >= 12) { + int i; + + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) + err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, + error->sfc_done[i]); + + err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done); + } + for (ee = error->engine; ee; ee = ee->next) - error_print_engine(m, ee, error->epoch); + error_print_engine(m, ee, error->capture); for (ee = error->engine; ee; ee = ee->next) { const struct drm_i915_error_object *obj; @@ -770,7 +780,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, for (j = 0; j < ee->num_requests; j++) error_print_request(m, " ", &ee->requests[j], - error->epoch); + error->capture); } print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer); @@ -970,7 +980,6 @@ i915_error_object_create(struct drm_i915_private *i915, struct drm_i915_error_object *dst; unsigned long num_pages; struct sgt_iter iter; - dma_addr_t dma; int ret; might_sleep(); @@ -996,17 +1005,54 @@ i915_error_object_create(struct drm_i915_private *i915, dst->page_count = 0; dst->unused = 0; + compress->wc = i915_gem_object_is_lmem(vma->obj) || + drm_mm_node_allocated(&ggtt->error_capture); + ret = -EINVAL; - for_each_sgt_daddr(dma, iter, vma->pages) { + if (drm_mm_node_allocated(&ggtt->error_capture)) { void __iomem *s; + dma_addr_t dma; - ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); + for_each_sgt_daddr(dma, iter, vma->pages) { + ggtt->vm.insert_page(&ggtt->vm, dma, slot, + I915_CACHE_NONE, 0); - s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); - ret = compress_page(compress, (void __force *)s, dst); - io_mapping_unmap(s); - if (ret) - break; + s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); + ret = compress_page(compress, (void __force *)s, dst); + io_mapping_unmap(s); + if (ret) + break; + } + } else if (i915_gem_object_is_lmem(vma->obj)) { + struct intel_memory_region *mem = vma->obj->mm.region; + dma_addr_t dma; + + for_each_sgt_daddr(dma, iter, vma->pages) { + void __iomem *s; + + s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE); + ret = compress_page(compress, (void __force *)s, dst); + io_mapping_unmap(s); + if (ret) + break; + } + } else { + struct page *page; + + for_each_sgt_page(page, iter, vma->pages) { + void *s; + + drm_clflush_pages(&page, 1); + + s = kmap(page); + ret = compress_page(compress, s, dst); + kunmap(page); + + drm_clflush_pages(&page, 1); + + if (ret) + break; + } } if (ret || compress_flush(compress, dst)) { @@ -1144,8 +1190,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error, } ee->idle = intel_engine_is_idle(engine); - if (!ee->idle) - ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, engine); @@ -1177,7 +1221,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, static void record_request(const struct i915_request *request, struct drm_i915_error_request *erq) { - const struct i915_gem_context *ctx = request->gem_context; + const struct i915_gem_context *ctx; erq->flags = request->fence.flags; erq->context = request->fence.context; @@ -1188,8 +1232,11 @@ static void record_request(const struct i915_request *request, erq->head = request->head; erq->tail = request->tail; + erq->pid = 0; rcu_read_lock(); - erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0; + ctx = rcu_dereference(request->context->gem_context); + if (ctx) + erq->pid = pid_nr(ctx->pid); rcu_read_unlock(); } @@ -1257,25 +1304,34 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine, static bool record_context(struct drm_i915_error_context *e, const struct i915_request *rq) { - const struct i915_gem_context *ctx = rq->gem_context; + struct i915_gem_context *ctx; + struct task_struct *task; + bool capture; - if (ctx->pid) { - struct task_struct *task; + rcu_read_lock(); + ctx = rcu_dereference(rq->context->gem_context); + if (ctx && !kref_get_unless_zero(&ctx->ref)) + ctx = NULL; + rcu_read_unlock(); + if (!ctx) + return false; - rcu_read_lock(); - task = pid_task(ctx->pid, PIDTYPE_PID); - if (task) { - strcpy(e->comm, task->comm); - e->pid = task->pid; - } - rcu_read_unlock(); + rcu_read_lock(); + task = pid_task(ctx->pid, PIDTYPE_PID); + if (task) { + strcpy(e->comm, task->comm); + e->pid = task->pid; } + rcu_read_unlock(); e->sched_attr = ctx->sched; e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); - return i915_gem_context_no_error_capture(ctx); + capture = i915_gem_context_no_error_capture(ctx); + + i915_gem_context_put(ctx); + return capture; } struct capture_vma { @@ -1411,7 +1467,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) capture = request_record_user_bo(request, ee, capture); capture = capture_vma(capture, - request->hw_context->state, + request->context->state, &ee->ctx); capture = capture_vma(capture, @@ -1563,6 +1619,18 @@ static void capture_reg_state(struct i915_gpu_state *error) if (IS_GEN_RANGE(i915, 8, 11)) error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); + if (IS_GEN(i915, 12)) + error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); + + if (INTEL_GEN(i915) >= 12) { + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { + error->sfc_done[i] = + intel_uncore_read(uncore, GEN12_SFC_DONE(i)); + } + + error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); + } + /* 4: Everything else */ if (INTEL_GEN(i915) >= 11) { error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); @@ -1657,26 +1725,15 @@ static void capture_params(struct i915_gpu_state *error) i915_params_copy(&error->params, &i915_modparams); } -static unsigned long capture_find_epoch(const struct i915_gpu_state *error) -{ - const struct drm_i915_error_engine *ee; - unsigned long epoch = error->capture; - - for (ee = error->engine; ee; ee = ee->next) { - if (ee->hangcheck_timestamp && - time_before(ee->hangcheck_timestamp, epoch)) - epoch = ee->hangcheck_timestamp; - } - - return epoch; -} - static void capture_finish(struct i915_gpu_state *error) { struct i915_ggtt *ggtt = &error->i915->ggtt; - const u64 slot = ggtt->error_capture.start; - ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + if (drm_mm_node_allocated(&ggtt->error_capture)) { + const u64 slot = ggtt->error_capture.start; + + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + } } #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) @@ -1722,8 +1779,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915) error->overlay = intel_overlay_capture_error_state(i915); error->display = intel_display_capture_error_state(i915); - error->epoch = capture_find_epoch(error); - capture_finish(error); compress_fini(&compress); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 7f1cd0b1fef7..5d2c3372ff99 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -34,7 +34,6 @@ struct i915_gpu_state { ktime_t boottime; ktime_t uptime; unsigned long capture; - unsigned long epoch; struct drm_i915_private *i915; @@ -75,6 +74,9 @@ struct i915_gpu_state { u32 gab_ctl; u32 gfx_mode; u32 gtt_cache; + u32 aux_err; /* gen12 */ + u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */ + u32 gam_done; /* gen12 */ u32 nfence; u64 fence[I915_MAX_NUM_FENCES]; @@ -86,7 +88,6 @@ struct i915_gpu_state { /* Software tracked state */ bool idle; - unsigned long hangcheck_timestamp; int num_requests; u32 reset_count; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 572a5c37cc61..42b79f577500 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -45,6 +45,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" +#include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_irq.h" @@ -320,180 +321,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, } } -static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) -{ - WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); - - return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; -} - -void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_gt *gt = &dev_priv->gt; - - spin_lock_irq(>->irq_lock); - - while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM)) - ; - - dev_priv->gt_pm.rps.pm_iir = 0; - - spin_unlock_irq(>->irq_lock); -} - -void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_gt *gt = &dev_priv->gt; - - spin_lock_irq(>->irq_lock); - gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS); - dev_priv->gt_pm.rps.pm_iir = 0; - spin_unlock_irq(>->irq_lock); -} - -void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_gt *gt = &dev_priv->gt; - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - if (READ_ONCE(rps->interrupts_enabled)) - return; - - spin_lock_irq(>->irq_lock); - WARN_ON_ONCE(rps->pm_iir); - - if (INTEL_GEN(dev_priv) >= 11) - WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM)); - else - WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); - - rps->interrupts_enabled = true; - gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events); - - spin_unlock_irq(>->irq_lock); -} - -u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask) -{ - return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz; -} - -void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - struct intel_gt *gt = &dev_priv->gt; - - if (!READ_ONCE(rps->interrupts_enabled)) - return; - - spin_lock_irq(>->irq_lock); - rps->interrupts_enabled = false; - - I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); - - gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); - - spin_unlock_irq(>->irq_lock); - intel_synchronize_irq(dev_priv); - - /* Now that we will not be generating any more work, flush any - * outstanding tasks. As we are called on the RPS idle path, - * we will reset the GPU to minimum frequencies, so the current - * state of the worker can be discarded. - */ - cancel_work_sync(&rps->work); - if (INTEL_GEN(dev_priv) >= 11) - gen11_reset_rps_interrupts(dev_priv); - else - gen6_reset_rps_interrupts(dev_priv); -} - -void gen9_reset_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - assert_rpm_wakelock_held(gt->uncore->rpm); - - spin_lock_irq(>->irq_lock); - gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); -} - -void gen9_enable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - assert_rpm_wakelock_held(gt->uncore->rpm); - - spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - WARN_ON_ONCE(intel_uncore_read(gt->uncore, - gen6_pm_iir(gt->i915)) & - gt->pm_guc_events); - guc->interrupts.enabled = true; - gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); - } - spin_unlock_irq(>->irq_lock); -} - -void gen9_disable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - assert_rpm_wakelock_held(gt->uncore->rpm); - - spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; - - gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); - - spin_unlock_irq(>->irq_lock); - intel_synchronize_irq(gt->i915); - - gen9_reset_guc_interrupts(guc); -} - -void gen11_reset_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - spin_lock_irq(>->irq_lock); - gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); - spin_unlock_irq(>->irq_lock); -} - -void gen11_enable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - - WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); - guc->interrupts.enabled = true; - } - spin_unlock_irq(>->irq_lock); -} - -void gen11_disable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; - - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); - - spin_unlock_irq(>->irq_lock); - intel_synchronize_irq(gt->i915); - - gen11_reset_guc_interrupts(guc); -} - /** * bdw_update_port_irq - update DE port interrupt * @dev_priv: driver private @@ -1065,199 +892,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) return position; } -static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 busy_up, busy_down, max_avg, min_avg; - u8 new_delay; - - spin_lock(&mchdev_lock); - - intel_uncore_write16(uncore, - MEMINTRSTS, - intel_uncore_read(uncore, MEMINTRSTS)); - - new_delay = dev_priv->ips.cur_delay; - - intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); - busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); - busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); - max_avg = intel_uncore_read(uncore, RCBMAXAVG); - min_avg = intel_uncore_read(uncore, RCBMINAVG); - - /* Handle RCS change request from hw */ - if (busy_up > max_avg) { - if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) - new_delay = dev_priv->ips.cur_delay - 1; - if (new_delay < dev_priv->ips.max_delay) - new_delay = dev_priv->ips.max_delay; - } else if (busy_down < min_avg) { - if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) - new_delay = dev_priv->ips.cur_delay + 1; - if (new_delay > dev_priv->ips.min_delay) - new_delay = dev_priv->ips.min_delay; - } - - if (ironlake_set_drps(dev_priv, new_delay)) - dev_priv->ips.cur_delay = new_delay; - - spin_unlock(&mchdev_lock); - - return; -} - -static void vlv_c0_read(struct drm_i915_private *dev_priv, - struct intel_rps_ei *ei) -{ - ei->ktime = ktime_get_raw(); - ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); - ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); -} - -void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) -{ - memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); -} - -static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - const struct intel_rps_ei *prev = &rps->ei; - struct intel_rps_ei now; - u32 events = 0; - - if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) - return 0; - - vlv_c0_read(dev_priv, &now); - - if (prev->ktime) { - u64 time, c0; - u32 render, media; - - time = ktime_us_delta(now.ktime, prev->ktime); - - time *= dev_priv->czclk_freq; - - /* Workload can be split between render + media, - * e.g. SwapBuffers being blitted in X after being rendered in - * mesa. To account for this we need to combine both engines - * into our activity counter. - */ - render = now.render_c0 - prev->render_c0; - media = now.media_c0 - prev->media_c0; - c0 = max(render, media); - c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ - - if (c0 > time * rps->power.up_threshold) - events = GEN6_PM_RP_UP_THRESHOLD; - else if (c0 < time * rps->power.down_threshold) - events = GEN6_PM_RP_DOWN_THRESHOLD; - } - - rps->ei = now; - return events; -} - -static void gen6_pm_rps_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, gt_pm.rps.work); - struct intel_gt *gt = &dev_priv->gt; - struct intel_rps *rps = &dev_priv->gt_pm.rps; - bool client_boost = false; - int new_delay, adj, min, max; - u32 pm_iir = 0; - - spin_lock_irq(>->irq_lock); - if (rps->interrupts_enabled) { - pm_iir = fetch_and_zero(&rps->pm_iir); - client_boost = atomic_read(&rps->num_waiters); - } - spin_unlock_irq(>->irq_lock); - - /* Make sure we didn't queue anything we're not going to process. */ - WARN_ON(pm_iir & ~dev_priv->pm_rps_events); - if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) - goto out; - - mutex_lock(&rps->lock); - - pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); - - adj = rps->last_adj; - new_delay = rps->cur_freq; - min = rps->min_freq_softlimit; - max = rps->max_freq_softlimit; - if (client_boost) - max = rps->max_freq; - if (client_boost && new_delay < rps->boost_freq) { - new_delay = rps->boost_freq; - adj = 0; - } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { - if (adj > 0) - adj *= 2; - else /* CHV needs even encode values */ - adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; - - if (new_delay >= rps->max_freq_softlimit) - adj = 0; - } else if (client_boost) { - adj = 0; - } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { - if (rps->cur_freq > rps->efficient_freq) - new_delay = rps->efficient_freq; - else if (rps->cur_freq > rps->min_freq_softlimit) - new_delay = rps->min_freq_softlimit; - adj = 0; - } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { - if (adj < 0) - adj *= 2; - else /* CHV needs even encode values */ - adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; - - if (new_delay <= rps->min_freq_softlimit) - adj = 0; - } else { /* unknown event */ - adj = 0; - } - - rps->last_adj = adj; - - /* - * Limit deboosting and boosting to keep ourselves at the extremes - * when in the respective power modes (i.e. slowly decrease frequencies - * while in the HIGH_POWER zone and slowly increase frequencies while - * in the LOW_POWER zone). On idle, we will hit the timeout and drop - * to the next level quickly, and conversely if busy we expect to - * hit a waitboost and rapidly switch into max power. - */ - if ((adj < 0 && rps->power.mode == HIGH_POWER) || - (adj > 0 && rps->power.mode == LOW_POWER)) - rps->last_adj = 0; - - /* sysfs frequency interfaces may have snuck in while servicing the - * interrupt - */ - new_delay += adj; - new_delay = clamp_t(int, new_delay, min, max); - - if (intel_set_rps(dev_priv, new_delay)) { - DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); - rps->last_adj = 0; - } - - mutex_unlock(&rps->lock); - -out: - /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ - spin_lock_irq(>->irq_lock); - if (rps->interrupts_enabled) - gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events); - spin_unlock_irq(>->irq_lock); -} - - /** * ivybridge_parity_work - Workqueue called when a parity error interrupt * occurred. @@ -1631,54 +1265,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, res1, res2); } -/* The RPS events need forcewake, so we add them to a work queue and mask their - * IMR bits until the work is done. Other interrupts can be processed without - * the work queue. */ -void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_rps *rps = &i915->gt_pm.rps; - const u32 events = i915->pm_rps_events & pm_iir; - - lockdep_assert_held(>->irq_lock); - - if (unlikely(!events)) - return; - - gen6_gt_pm_mask_irq(gt, events); - - if (!rps->interrupts_enabled) - return; - - rps->pm_iir |= events; - schedule_work(&rps->work); -} - -void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - struct intel_gt *gt = &dev_priv->gt; - - if (pm_iir & dev_priv->pm_rps_events) { - spin_lock(>->irq_lock); - gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events); - if (rps->interrupts_enabled) { - rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; - schedule_work(&rps->work); - } - spin_unlock(>->irq_lock); - } - - if (INTEL_GEN(dev_priv) >= 8) - return; - - if (pm_iir & PM_VEBOX_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); - - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); -} - static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -1989,7 +1575,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) if (gt_iir) gen6_gt_irq_handler(&dev_priv->gt, gt_iir); if (pm_iir) - gen6_rps_irq_handler(dev_priv, pm_iir); + gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -2393,7 +1979,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, } if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) - ironlake_rps_change_irq_handler(dev_priv); + gen5_rps_irq_handler(&dev_priv->gt.rps); } static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, @@ -2498,7 +2084,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (pm_iir) { I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; - gen6_rps_irq_handler(dev_priv, pm_iir); + gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); } } @@ -2575,10 +2161,16 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) u32 mask; if (INTEL_GEN(dev_priv) >= 12) - /* TODO: Add AUX entries for USBC */ return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | - TGL_DE_PORT_AUX_DDIC; + TGL_DE_PORT_AUX_DDIC | + TGL_DE_PORT_AUX_USBC1 | + TGL_DE_PORT_AUX_USBC2 | + TGL_DE_PORT_AUX_USBC3 | + TGL_DE_PORT_AUX_USBC4 | + TGL_DE_PORT_AUX_USBC5 | + TGL_DE_PORT_AUX_USBC6; + mask = GEN8_AUX_CHANNEL_A; if (INTEL_GEN(dev_priv) >= 9) @@ -2597,7 +2189,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 9) + if (INTEL_GEN(dev_priv) >= 11) + return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; + else if (INTEL_GEN(dev_priv) >= 9) return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; else return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; @@ -2859,9 +2453,30 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); } -static irqreturn_t gen11_irq_handler(int irq, void *arg) +static void +gen11_display_irq_handler(struct drm_i915_private *i915) +{ + void __iomem * const regs = i915->uncore.regs; + const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); + + disable_rpm_wakeref_asserts(&i915->runtime_pm); + /* + * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ + * for the display related bits. + */ + raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); + gen8_de_irq_handler(i915, disp_ctl); + raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, + GEN11_DISPLAY_IRQ_ENABLE); + + enable_rpm_wakeref_asserts(&i915->runtime_pm); +} + +static __always_inline irqreturn_t +__gen11_irq_handler(struct drm_i915_private * const i915, + u32 (*intr_disable)(void __iomem * const regs), + void (*intr_enable)(void __iomem * const regs)) { - struct drm_i915_private * const i915 = arg; void __iomem * const regs = i915->uncore.regs; struct intel_gt *gt = &i915->gt; u32 master_ctl; @@ -2870,9 +2485,9 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(i915)) return IRQ_NONE; - master_ctl = gen11_master_intr_disable(regs); + master_ctl = intr_disable(regs); if (!master_ctl) { - gen11_master_intr_enable(regs); + intr_enable(regs); return IRQ_NONE; } @@ -2880,27 +2495,25 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) gen11_gt_irq_handler(gt, master_ctl); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - if (master_ctl & GEN11_DISPLAY_IRQ) { - const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); - - disable_rpm_wakeref_asserts(&i915->runtime_pm); - /* - * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ - * for the display related bits. - */ - gen8_de_irq_handler(i915, disp_ctl); - enable_rpm_wakeref_asserts(&i915->runtime_pm); - } + if (master_ctl & GEN11_DISPLAY_IRQ) + gen11_display_irq_handler(i915); gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); - gen11_master_intr_enable(regs); + intr_enable(regs); gen11_gu_misc_irq_handler(gt, gu_misc_iir); return IRQ_HANDLED; } +static irqreturn_t gen11_irq_handler(int irq, void *arg) +{ + return __gen11_irq_handler(arg, + gen11_master_intr_disable, + gen11_master_intr_enable); +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -3185,15 +2798,11 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) ibx_irq_reset(dev_priv); } -static void gen11_irq_reset(struct drm_i915_private *dev_priv) +static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; enum pipe pipe; - gen11_master_intr_disable(dev_priv->uncore.regs); - - gen11_gt_irq_reset(&dev_priv->gt); - intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); if (INTEL_GEN(dev_priv) >= 12) { @@ -3222,13 +2831,24 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); - GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); - GEN3_IRQ_RESET(uncore, GEN8_PCU_); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); } +static void gen11_irq_reset(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + + gen11_master_intr_disable(dev_priv->uncore.regs); + + gen11_gt_irq_reset(&dev_priv->gt); + gen11_display_irq_reset(dev_priv); + + GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); + GEN3_IRQ_RESET(uncore, GEN8_PCU_); +} + void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask) { @@ -3373,6 +2993,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, hotplug_irqs = sde_ddi_mask | sde_tc_mask; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins); + I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); @@ -3478,6 +3100,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; + if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) + I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + hotplug_irqs = SDE_HOTPLUG_MASK_SPT; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); @@ -3994,7 +3619,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4099,7 +3724,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4241,10 +3866,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); if (iir & I915_BSD_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); + intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4270,13 +3895,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) void intel_irq_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - struct intel_rps *rps = &dev_priv->gt_pm.rps; int i; intel_hpd_init_work(dev_priv); - INIT_WORK(&rps->work, gen6_pm_rps_work); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; @@ -4285,33 +3907,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; - /* Let's track the enabled rps events */ - if (IS_VALLEYVIEW(dev_priv)) - /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; - else - dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_DOWN_TIMEOUT); - - /* We share the register with other engine */ - if (INTEL_GEN(dev_priv) > 9) - GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000); - - rps->pm_intrmsk_mbz = 0; - - /* - * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - * - * TODO: verify if this can be reproduced on VLV,CHV. - */ - if (INTEL_GEN(dev_priv) <= 7) - rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; - - if (INTEL_GEN(dev_priv) >= 8) - rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 19a3bc019535..812c47a9c2d6 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -17,14 +17,8 @@ struct drm_device; struct drm_display_mode; struct drm_i915_private; struct intel_crtc; -struct intel_crtc; -struct intel_gt; -struct intel_guc; struct intel_uncore; -void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir); -void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); - void intel_irq_init(struct drm_i915_private *dev_priv); void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); @@ -106,12 +100,6 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 pipe_mask); -void gen9_reset_guc_interrupts(struct intel_guc *guc); -void gen9_enable_guc_interrupts(struct intel_guc *guc); -void gen9_disable_guc_interrupts(struct intel_guc *guc); -void gen11_reset_guc_interrupts(struct intel_guc *guc); -void gen11_enable_guc_interrupts(struct intel_guc *guc); -void gen11_disable_guc_interrupts(struct intel_guc *guc); bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c index 07b04b0acb77..fdd550405fd3 100644 --- a/drivers/gpu/drm/i915/i915_memcpy.c +++ b/drivers/gpu/drm/i915/i915_memcpy.c @@ -27,6 +27,12 @@ #include "i915_memcpy.h" +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) +#define CI_BUG_ON(expr) BUG_ON(expr) +#else +#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + static DEFINE_STATIC_KEY_FALSE(has_movntdqa); #ifdef CONFIG_AS_MOVNTDQA @@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) { kernel_fpu_begin(); - len >>= 4; while (len >= 4) { asm("movntdqa (%0), %%xmm0\n" "movntdqa 16(%0), %%xmm1\n" @@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) kernel_fpu_end(); } + +static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) +{ + kernel_fpu_begin(); + + while (len >= 4) { + asm("movntdqa (%0), %%xmm0\n" + "movntdqa 16(%0), %%xmm1\n" + "movntdqa 32(%0), %%xmm2\n" + "movntdqa 48(%0), %%xmm3\n" + "movups %%xmm0, (%1)\n" + "movups %%xmm1, 16(%1)\n" + "movups %%xmm2, 32(%1)\n" + "movups %%xmm3, 48(%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 64; + dst += 64; + len -= 4; + } + while (len--) { + asm("movntdqa (%0), %%xmm0\n" + "movups %%xmm0, (%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 16; + dst += 16; + } + + kernel_fpu_end(); +} +#else +static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {} +static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {} #endif /** @@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len) if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15)) return false; -#ifdef CONFIG_AS_MOVNTDQA if (static_branch_likely(&has_movntdqa)) { if (likely(len)) - __memcpy_ntdqa(dst, src, len); + __memcpy_ntdqa(dst, src, len >> 4); return true; } -#endif return false; } +/** + * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC + * @dst: destination pointer + * @src: source pointer + * @len: how many bytes to copy + * + * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from + * @src to @dst using * non-temporal instructions where available, but + * accepts that its arguments may not be aligned, but are valid for the + * potential 16-byte read past the end. + */ +void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len) +{ + unsigned long addr; + + CI_BUG_ON(!i915_has_memcpy_from_wc()); + + addr = (unsigned long)src; + if (!IS_ALIGNED(addr, 16)) { + unsigned long x = min(ALIGN(addr, 16) - addr, len); + + memcpy(dst, src, x); + + len -= x; + dst += x; + src += x; + } + + if (likely(len)) + __memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16)); +} + void i915_memcpy_init_early(struct drm_i915_private *dev_priv) { /* diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h index 970d84b16987..e36d30edd987 100644 --- a/drivers/gpu/drm/i915/i915_memcpy.h +++ b/drivers/gpu/drm/i915/i915_memcpy.h @@ -11,7 +11,9 @@ struct drm_i915_private; void i915_memcpy_init_early(struct drm_i915_private *i915); + bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); +void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len); /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 4f1806f65040..1dd1f3652795 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -166,7 +166,7 @@ i915_param_named_unsafe(enable_dp_mst, bool, 0600, "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -i915_param_named_unsafe(inject_load_failure, uint, 0400, +i915_param_named_unsafe(inject_probe_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); #endif @@ -179,6 +179,11 @@ i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); #endif +#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM) +i915_param_named_unsafe(fake_lmem_start, ulong, 0600, + "Fake LMEM start offset (default: 0)"); +#endif + static __always_inline void _print_param(struct drm_printer *p, const char *name, const char *type, @@ -190,6 +195,8 @@ static __always_inline void _print_param(struct drm_printer *p, drm_printf(p, "i915.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x); + else if (!__builtin_strcmp(type, "unsigned long")) + drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x); else if (!__builtin_strcmp(type, "char *")) drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); else diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index d29ade3b7de6..31b88f297fbc 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -61,11 +61,12 @@ struct drm_printer; param(char *, dmc_firmware_path, NULL) \ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \ param(int, edp_vswing, 0) \ - param(int, reset, 2) \ - param(unsigned int, inject_load_failure, 0) \ + param(int, reset, 3) \ + param(unsigned int, inject_probe_failure, 0) \ param(int, fastboot, -1) \ param(int, enable_dpcd_backlight, 0) \ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \ + param(unsigned long, fake_lmem_start, 0) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, enable_hangcheck, true) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index f9a3bfe68689..9571611b4b16 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -30,6 +30,7 @@ #include "display/intel_fbdev.h" #include "i915_drv.h" +#include "i915_perf.h" #include "i915_globals.h" #include "i915_selftest.h" @@ -436,7 +437,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .has_rc6 = 1, \ .has_rc6p = 1, \ .has_rps = true, \ - .ppgtt_type = INTEL_PPGTT_ALIASING, \ + .ppgtt_type = INTEL_PPGTT_FULL, \ .ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ @@ -493,7 +494,7 @@ static const struct intel_device_info intel_valleyview_info = { .has_rps = true, .display.has_gmch = 1, .display.has_hotplug = 1, - .ppgtt_type = INTEL_PPGTT_ALIASING, + .ppgtt_type = INTEL_PPGTT_FULL, .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, @@ -612,6 +613,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_logical_ring_preemption = 1, \ .display.has_csr = 1, \ .has_gt_uc = 1, \ + .display.has_hdcp = 1, \ .display.has_ipc = 1, \ .ddb_size = 896 @@ -655,6 +657,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ .display.has_fbc = 1, \ + .display.has_hdcp = 1, \ .display.has_psr = 1, \ .has_runtime_pm = 1, \ .display.has_csr = 1, \ @@ -735,6 +738,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { GEN9_FEATURES, \ GEN(10), \ .ddb_size = 1024, \ + .display.has_dsc = 1, \ .has_coherent_ggtt = false, \ GLK_COLORS @@ -822,6 +826,10 @@ static const struct intel_device_info intel_tigerlake_12_info = { .has_rps = false, /* XXX disabled for debugging */ }; +#define GEN12_DGFX_FEATURES \ + GEN12_FEATURES, \ + .is_dgfx = 1 + #undef GEN #undef PLATFORM @@ -890,6 +898,8 @@ static const struct pci_device_id pciidlist[] = { INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CML_U_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CML_U_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), INTEL_EHL_IDS(&intel_elkhartlake_info), @@ -996,6 +1006,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err > 0 ? -ENOTTY : err; } + err = i915_perf_selftests(pdev); + if (err) { + i915_pci_remove(pdev); + return err > 0 ? -ENOTTY : err; + } + return 0; } @@ -1038,7 +1054,12 @@ static int __init i915_init(void) return 0; } - return pci_register_driver(&i915_pci_driver); + err = pci_register_driver(&i915_pci_driver); + if (err) + return err; + + i915_perf_sysctl_register(); + return 0; } static void __exit i915_exit(void) @@ -1046,6 +1067,7 @@ static void __exit i915_exit(void) if (!i915_pci_driver.driver.owner) return; + i915_perf_sysctl_unregister(); pci_unregister_driver(&i915_pci_driver); i915_globals_exit(); } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d2ac51fe4f04..84350c7bc711 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -200,6 +200,7 @@ #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_perf.h" @@ -217,6 +218,7 @@ #include "oa/i915_oa_cflgt3.h" #include "oa/i915_oa_cnl.h" #include "oa/i915_oa_icl.h" +#include "oa/i915_oa_tgl.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -293,6 +295,7 @@ static u32 i915_perf_stream_paranoid = true; /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ #define OAREPORT_REASON_MASK 0x3f +#define OAREPORT_REASON_MASK_EXTENDED 0x7f #define OAREPORT_REASON_SHIFT 19 #define OAREPORT_REASON_TIMER (1<<0) #define OAREPORT_REASON_CTX_SWITCH (1<<3) @@ -338,6 +341,10 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_C4_B8] = { 7, 64 }, }; +static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { + [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, +}; + #define SAMPLE_OA_REPORT (1<<0) /** @@ -380,6 +387,8 @@ struct i915_oa_config_bo { struct i915_vma *vma; }; +static struct ctl_table_header *sysctl_header; + static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); void i915_oa_config_release(struct kref *ref) @@ -418,6 +427,14 @@ static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) kfree(oa_bo); } +static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + + return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & + GEN12_OAG_OATAILPTR_MASK; +} + static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; @@ -538,7 +555,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) aging_tail = hw_tail; stream->oa_buffer.aging_timestamp = now; } else { - DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", + DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n", hw_tail); } } @@ -740,7 +757,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * it to userspace... */ reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & - OAREPORT_REASON_MASK); + (IS_GEN(stream->perf->i915, 12) ? + OAREPORT_REASON_MASK_EXTENDED : + OAREPORT_REASON_MASK)); if (reason == 0) { if (__ratelimit(&stream->perf->spurious_report_rs)) DRM_NOTE("Skipping spurious, invalid OA report\n"); @@ -757,7 +776,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * Note: that we don't clear the valid_ctx_bit so userspace can * understand that the ID has been squashed by the kernel. */ - if (!(report32[0] & stream->perf->gen8_valid_ctx_bit)) + if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && + INTEL_GEN(stream->perf->i915) <= 11) ctx_id = report32[2] = INVALID_CTX_ID; /* @@ -824,6 +844,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, } if (start_offset != *offset) { + i915_reg_t oaheadptr; + + oaheadptr = IS_GEN(stream->perf->i915, 12) ? + GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; + spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); /* @@ -831,9 +856,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * relative to oa_buf_base so put back here... */ head += gtt_offset; - - intel_uncore_write(uncore, GEN8_OAHEADPTR, - head & GEN8_OAHEADPTR_MASK); + intel_uncore_write(uncore, oaheadptr, + head & GEN12_OAG_OAHEADPTR_MASK); stream->oa_buffer.head = head; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -869,12 +893,16 @@ static int gen8_oa_read(struct i915_perf_stream *stream, { struct intel_uncore *uncore = stream->uncore; u32 oastatus; + i915_reg_t oastatus_reg; int ret; if (WARN_ON(!stream->oa_buffer.vaddr)) return -EIO; - oastatus = intel_uncore_read(uncore, GEN8_OASTATUS); + oastatus_reg = IS_GEN(stream->perf->i915, 12) ? + GEN12_OAG_OASTATUS : GEN8_OASTATUS; + + oastatus = intel_uncore_read(uncore, oastatus_reg); /* * We treat OABUFFER_OVERFLOW as a significant error: @@ -906,7 +934,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream, * Note: .oa_enable() is expected to re-init the oabuffer and * reset GEN8_OASTATUS for us */ - oastatus = intel_uncore_read(uncore, GEN8_OASTATUS); + oastatus = intel_uncore_read(uncore, oastatus_reg); } if (oastatus & GEN8_OASTATUS_REPORT_LOST) { @@ -914,7 +942,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream, DRM_I915_PERF_RECORD_OA_REPORT_LOST); if (ret) return ret; - intel_uncore_write(uncore, GEN8_OASTATUS, + intel_uncore_write(uncore, oastatus_reg, oastatus & ~GEN8_OASTATUS_REPORT_LOST); } @@ -1260,7 +1288,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) case 8: case 9: case 10: - if (USES_GUC_SUBMISSION(ce->engine->i915)) { + if (intel_engine_in_execlists_submission_mode(ce->engine)) { + stream->specific_ctx_id_mask = + (1U << GEN8_CTX_ID_WIDTH) - 1; + stream->specific_ctx_id = stream->specific_ctx_id_mask; + } else { /* * When using GuC, the context descriptor we write in * i915 is read by GuC and rewritten before it's @@ -1280,10 +1312,6 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) */ stream->specific_ctx_id_mask = (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; - } else { - stream->specific_ctx_id_mask = - (1U << GEN8_CTX_ID_WIDTH) - 1; - stream->specific_ctx_id = stream->specific_ctx_id_mask; } break; @@ -1488,6 +1516,63 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream) stream->pollin = false; } +static void gen12_init_oa_buffer(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); + unsigned long flags; + + spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); + + intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); + intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, + gtt_offset & GEN12_OAG_OAHEADPTR_MASK); + stream->oa_buffer.head = gtt_offset; + + /* + * PRM says: + * + * "This MMIO must be set before the OATAILPTR + * register and after the OAHEADPTR register. This is + * to enable proper functionality of the overflow + * bit." + */ + intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | + OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); + intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, + gtt_offset & GEN12_OAG_OATAILPTR_MASK); + + /* Mark that we need updated tail pointers to read from... */ + stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; + stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; + + /* + * Reset state used to recognise context switches, affecting which + * reports we will forward to userspace while filtering for a single + * context. + */ + stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; + + spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); + + /* + * NB: although the OA buffer will initially be allocated + * zeroed via shmfs (and so this memset is redundant when + * first allocating), we may re-init the OA buffer, either + * when re-enabling a stream or in error/reset paths. + * + * The reason we clear the buffer for each re-init is for the + * sanity check in gen8_append_oa_reports() that looks at the + * reason field to make sure it's non-zero which relies on + * the assumption that new reports are being written to zeroed + * memory... + */ + memset(stream->oa_buffer.vaddr, 0, + stream->oa_buffer.vma->size); + + stream->pollin = false; +} + static int alloc_oa_buffer(struct i915_perf_stream *stream) { struct drm_i915_gem_object *bo; @@ -1694,6 +1779,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream) *cs++ = MI_MATH_ADD; *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); + *cs++ = MI_ARB_CHECK; + /* * Transfer the result into the predicate register to be used for the * predicated jump. @@ -1787,7 +1874,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream, config_length += num_lri_dwords(oa_config->mux_regs_len); config_length += num_lri_dwords(oa_config->b_counter_regs_len); config_length += num_lri_dwords(oa_config->flex_regs_len); - config_length++; /* MI_BATCH_BUFFER_END */ + config_length += 3; /* MI_BATCH_BUFFER_START */ config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); @@ -1812,7 +1899,12 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream, oa_config->flex_regs, oa_config->flex_regs_len); - *cs++ = MI_BATCH_BUFFER_END; + /* Jump into the active wait. */ + *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ? + MI_BATCH_BUFFER_START : + MI_BATCH_BUFFER_START_GEN8); + *cs++ = i915_ggtt_offset(stream->noa_wait); + *cs++ = 0; i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); @@ -1878,7 +1970,9 @@ static int emit_oa_config(struct i915_perf_stream *stream, if (err) goto err_vma_put; + intel_engine_pm_get(ce->engine); rq = i915_request_create(ce); + intel_engine_pm_put(ce->engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_vma_unpin; @@ -2067,7 +2161,7 @@ static int gen8_modify_context(struct intel_context *ce, lockdep_assert_held(&ce->pin_mutex); - rq = i915_request_create(ce->engine->kernel_context); + rq = intel_engine_create_kernel_request(ce->engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -2128,6 +2222,53 @@ static int gen8_configure_context(struct i915_gem_context *ctx, return err; } +static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable) +{ + int err; + struct intel_context *ce = stream->pinned_ctx; + u32 format = stream->oa_buffer.format; + struct flex regs_context[] = { + { + GEN8_OACTXCONTROL, + stream->perf->ctx_oactxctrl_offset + 1, + enable ? GEN8_OA_COUNTER_RESUME : 0, + }, + }; + /* Offsets in regs_lri are not used since this configuration is only + * applied using LRI. Initialize the correct offsets for posterity. + */ +#define GEN12_OAR_OACONTROL_OFFSET 0x5B0 + struct flex regs_lri[] = { + { + GEN12_OAR_OACONTROL, + GEN12_OAR_OACONTROL_OFFSET + 1, + (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | + (enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) + }, + { + RING_CONTEXT_CONTROL(ce->engine->mmio_base), + CTX_CONTEXT_CONTROL, + _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, + enable ? + GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : + 0) + }, + }; + + /* Modify the context image of pinned context with regs_context*/ + err = intel_context_lock_pinned(ce); + if (err) + return err; + + err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); + intel_context_unlock_pinned(ce); + if (err) + return err; + + /* Apply regs_lri using LRI with pinned context */ + return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri)); +} + /* * Manages updating the per-context aspects of the OA stream * configuration across all contexts. @@ -2151,41 +2292,16 @@ static int gen8_configure_context(struct i915_gem_context *ctx, * per-context OA state. * * Note: it's only the RCS/Render context that has any OA state. + * Note: the first flex register passed must always be R_PWR_CLK_STATE */ -static int gen8_configure_all_contexts(struct i915_perf_stream *stream, - const struct i915_oa_config *oa_config) +static int oa_configure_all_contexts(struct i915_perf_stream *stream, + struct flex *regs, + size_t num_regs) { struct drm_i915_private *i915 = stream->perf->i915; - /* The MMIO offsets for Flex EU registers aren't contiguous */ - const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; -#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) - struct flex regs[] = { - { - GEN8_R_PWR_CLK_STATE, - CTX_R_PWR_CLK_STATE, - }, - { - GEN8_OACTXCONTROL, - stream->perf->ctx_oactxctrl_offset + 1, - ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME) - }, - { EU_PERF_CNTL0, ctx_flexeuN(0) }, - { EU_PERF_CNTL1, ctx_flexeuN(1) }, - { EU_PERF_CNTL2, ctx_flexeuN(2) }, - { EU_PERF_CNTL3, ctx_flexeuN(3) }, - { EU_PERF_CNTL4, ctx_flexeuN(4) }, - { EU_PERF_CNTL5, ctx_flexeuN(5) }, - { EU_PERF_CNTL6, ctx_flexeuN(6) }, - }; -#undef ctx_flexeuN struct intel_engine_cs *engine; struct i915_gem_context *ctx, *cn; - int i, err; - - for (i = 2; i < ARRAY_SIZE(regs); i++) - regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); + int err; lockdep_assert_held(&stream->perf->lock); @@ -2207,15 +2323,12 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, */ spin_lock(&i915->gem.contexts.lock); list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { - if (ctx == i915->kernel_context) - continue; - if (!kref_get_unless_zero(&ctx->ref)) continue; spin_unlock(&i915->gem.contexts.lock); - err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs)); + err = gen8_configure_context(ctx, regs, num_regs); if (err) { i915_gem_context_put(ctx); return err; @@ -2240,7 +2353,7 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); - err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs)); + err = gen8_modify_self(ce, regs, num_regs); if (err) return err; } @@ -2248,6 +2361,56 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, return 0; } +static int gen12_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config) +{ + struct flex regs[] = { + { + GEN8_R_PWR_CLK_STATE, + CTX_R_PWR_CLK_STATE, + }, + }; + + return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); +} + +static int lrc_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config) +{ + /* The MMIO offsets for Flex EU registers aren't contiguous */ + const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; +#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) + struct flex regs[] = { + { + GEN8_R_PWR_CLK_STATE, + CTX_R_PWR_CLK_STATE, + }, + { + GEN8_OACTXCONTROL, + stream->perf->ctx_oactxctrl_offset + 1, + }, + { EU_PERF_CNTL0, ctx_flexeuN(0) }, + { EU_PERF_CNTL1, ctx_flexeuN(1) }, + { EU_PERF_CNTL2, ctx_flexeuN(2) }, + { EU_PERF_CNTL3, ctx_flexeuN(3) }, + { EU_PERF_CNTL4, ctx_flexeuN(4) }, + { EU_PERF_CNTL5, ctx_flexeuN(5) }, + { EU_PERF_CNTL6, ctx_flexeuN(6) }, + }; +#undef ctx_flexeuN + int i; + + regs[1].value = + (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME; + + for (i = 2; i < ARRAY_SIZE(regs); i++) + regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); + + return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); +} + static int gen8_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; @@ -2288,10 +2451,64 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = gen8_configure_all_contexts(stream, oa_config); + ret = lrc_configure_all_contexts(stream, oa_config); + if (ret) + return ret; + + return emit_oa_config(stream, oa_config, oa_context(stream)); +} + +static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) +{ + return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, + (stream->sample_flags & SAMPLE_OA_REPORT) ? + 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); +} + +static int gen12_enable_metric_set(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + struct i915_oa_config *oa_config = stream->oa_config; + bool periodic = stream->periodic; + u32 period_exponent = stream->period_exponent; + int ret; + + intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, + /* Disable clk ratio reports, like previous Gens. */ + _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | + GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | + /* + * If the user didn't require OA reports, instruct + * the hardware not to emit ctx switch reports. + */ + oag_report_ctx_switches(stream)); + + intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? + (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | + GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | + (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) + : 0); + + /* + * Update all contexts prior writing the mux configurations as we need + * to make sure all slices/subslices are ON before writing to NOA + * registers. + */ + ret = gen12_configure_all_contexts(stream, oa_config); if (ret) return ret; + /* + * For Gen12, performance counters are context + * saved/restored. Only enable it for the context that + * requested this. + */ + if (stream->ctx) { + ret = gen12_configure_oar_context(stream, true); + if (ret) + return ret; + } + return emit_oa_config(stream, oa_config, oa_context(stream)); } @@ -2300,7 +2517,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream) struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(stream, NULL); + lrc_configure_all_contexts(stream, NULL); intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); } @@ -2310,7 +2527,22 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream) struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(stream, NULL); + lrc_configure_all_contexts(stream, NULL); + + /* Make sure we disable noa to save power. */ + intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); +} + +static void gen12_disable_metric_set(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + + /* Reset all contexts' slices/subslices configurations. */ + gen12_configure_all_contexts(stream, NULL); + + /* disable the context save/restore or OAR counters */ + if (stream->ctx) + gen12_configure_oar_context(stream, false); /* Make sure we disable noa to save power. */ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); @@ -2372,6 +2604,25 @@ static void gen8_oa_enable(struct i915_perf_stream *stream) GEN8_OA_COUNTER_ENABLE); } +static void gen12_oa_enable(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + u32 report_format = stream->oa_buffer.format; + + /* + * If we don't want OA reports from the OA buffer, then we don't even + * need to program the OAG unit. + */ + if (!(stream->sample_flags & SAMPLE_OA_REPORT)) + return; + + gen12_init_oa_buffer(stream); + + intel_uncore_write(uncore, GEN12_OAG_OACONTROL, + (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | + GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); +} + /** * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream * @stream: An i915 perf stream opened for OA metrics @@ -2413,6 +2664,18 @@ static void gen8_oa_disable(struct i915_perf_stream *stream) DRM_ERROR("wait for OA to be disabled timed out\n"); } +static void gen12_oa_disable(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + + intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); + if (intel_wait_for_register(uncore, + GEN12_OAG_OACONTROL, + GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, + 50)) + DRM_ERROR("wait for OA to be disabled timed out\n"); +} + /** * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream * @stream: An i915 perf stream opened for OA metrics @@ -2479,7 +2742,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -EINVAL; } - if (!(props->sample_flags & SAMPLE_OA_REPORT)) { + if (!(props->sample_flags & SAMPLE_OA_REPORT) && + (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) { DRM_DEBUG("Only OA report sampling supported\n"); return -EINVAL; } @@ -2511,7 +2775,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, format_size = perf->oa_formats[props->oa_format].size; - stream->sample_flags |= SAMPLE_OA_REPORT; + stream->sample_flags = props->sample_flags; stream->sample_size += format_size; stream->oa_buffer.format_size = format_size; @@ -2614,14 +2878,17 @@ void i915_oa_init_reg_state(const struct intel_context *ce, { struct i915_perf_stream *stream; - /* perf.exclusive_stream serialised by gen8_configure_all_contexts() */ - lockdep_assert_held(&ce->pin_mutex); + /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ if (engine->class != RENDER_CLASS) return; stream = engine->i915->perf.exclusive_stream; - if (stream) + /* + * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller + * is already doing that, so nothing to be done for gen12 here. + */ + if (stream && INTEL_GEN(stream->perf->i915) < 12) gen8_update_reg_state_unlocked(ce, stream); } @@ -2842,7 +3109,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream) stream->ops->enable(stream); if (stream->hold_preemption) - i915_gem_context_set_nopreempt(stream->ctx); + intel_context_set_nopreempt(stream->pinned_ctx); } /** @@ -2868,7 +3135,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream) stream->enabled = false; if (stream->hold_preemption) - i915_gem_context_clear_nopreempt(stream->ctx); + intel_context_clear_nopreempt(stream->pinned_ctx); if (stream->ops->disable) stream->ops->disable(stream); @@ -3079,32 +3346,40 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf, } } - if (props->hold_preemption) { - if (!props->single_context) { - DRM_DEBUG("preemption disable with no context\n"); - ret = -EINVAL; - goto err; - } - privileged_op = true; - } - /* * On Haswell the OA unit supports clock gating off for a specific * context and in this mode there's no visibility of metrics for the * rest of the system, which we consider acceptable for a * non-privileged client. * - * For Gen8+ the OA unit no longer supports clock gating off for a + * For Gen8->11 the OA unit no longer supports clock gating off for a * specific context and the kernel can't securely stop the counters * from updating as system-wide / global values. Even though we can * filter reports based on the included context ID we can't block * clients from seeing the raw / global counter values via * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to * enable the OA unit by default. + * + * For Gen12+ we gain a new OAR unit that only monitors the RCS on a + * per context basis. So we can relax requirements there if the user + * doesn't request global stream access (i.e. query based sampling + * using MI_RECORD_PERF_COUNT. */ - if (IS_HASWELL(perf->i915) && specific_ctx && !props->hold_preemption) + if (IS_HASWELL(perf->i915) && specific_ctx) + privileged_op = false; + else if (IS_GEN(perf->i915, 12) && specific_ctx && + (props->sample_flags & SAMPLE_OA_REPORT) == 0) privileged_op = false; + if (props->hold_preemption) { + if (!props->single_context) { + DRM_DEBUG("preemption disable with no context\n"); + ret = -EINVAL; + goto err; + } + privileged_op = true; + } + /* Similar to perf's kernel.perf_paranoid_cpu sysctl option * we check a dev.i915.perf_stream_paranoid sysctl option * to determine if it's ok to access system wide OA counters @@ -3418,7 +3693,9 @@ void i915_perf_register(struct drm_i915_private *i915) sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr); - if (INTEL_GEN(i915) >= 11) { + if (IS_TIGERLAKE(i915)) { + i915_perf_load_test_config_tgl(i915); + } else if (INTEL_GEN(i915) >= 11) { i915_perf_load_test_config_icl(i915); } else if (IS_CANNONLAKE(i915)) { i915_perf_load_test_config_cnl(i915); @@ -3515,56 +3792,80 @@ static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) return false; } +#define ADDR_IN_RANGE(addr, start, end) \ + ((addr) >= (start) && \ + (addr) <= (end)) + +#define REG_IN_RANGE(addr, start, end) \ + ((addr) >= i915_mmio_reg_offset(start) && \ + (addr) <= i915_mmio_reg_offset(end)) + +#define REG_EQUAL(addr, mmio) \ + ((addr) == i915_mmio_reg_offset(mmio)) + static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) && - addr <= i915_mmio_reg_offset(OASTARTTRIG8)) || - (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) && - addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) || - (addr >= i915_mmio_reg_offset(OACEC0_0) && - addr <= i915_mmio_reg_offset(OACEC7_1)); + return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) || + REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) || + REG_IN_RANGE(addr, OACEC0_0, OACEC7_1); } static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) || - (addr >= i915_mmio_reg_offset(MICRO_BP0_0) && - addr <= i915_mmio_reg_offset(NOA_WRITE)) || - (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) && - addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) || - (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) && - addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI)); + return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) || + REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) || + REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) || + REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI); } static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { return gen7_is_valid_mux_addr(perf, addr) || - addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) || - (addr >= i915_mmio_reg_offset(RPM_CONFIG0) && - addr <= i915_mmio_reg_offset(NOA_CONFIG(8))); + REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || + REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8)); } static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { return gen8_is_valid_mux_addr(perf, addr) || - addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) || - (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) && - addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI)); + REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || + REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI); } static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { return gen7_is_valid_mux_addr(perf, addr) || - (addr >= 0x25100 && addr <= 0x2FF90) || - (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) && - addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) || - addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0); + ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) || + REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) || + REG_EQUAL(addr, HSW_MBVID2_MISR0); } static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { return gen7_is_valid_mux_addr(perf, addr) || - (addr >= 0x182300 && addr <= 0x1823A4); + ADDR_IN_RANGE(addr, 0x182300, 0x1823A4); +} + +static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) +{ + return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) || + REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) || + REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) || + REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) || + REG_EQUAL(addr, GEN12_OAA_DBG_REG) || + REG_EQUAL(addr, GEN12_OAG_OA_PESS) || + REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF); +} + +static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) +{ + return REG_EQUAL(addr, NOA_WRITE) || + REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || + REG_EQUAL(addr, GDT_CHICKEN_BITS) || + REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || + REG_EQUAL(addr, RPM_CONFIG0) || + REG_EQUAL(addr, RPM_CONFIG1) || + REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8)); } static u32 mask_reg_value(u32 reg, u32 val) @@ -3573,14 +3874,14 @@ static u32 mask_reg_value(u32 reg, u32 val) * WaDisableSTUnitPowerOptimization workaround. Make sure the value * programmed by userspace doesn't change this. */ - if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg) + if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function * indicated by its name and a bunch of selection fields used by OA * configs. */ - if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg) + if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); return val; @@ -3688,7 +3989,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, struct i915_perf *perf = &to_i915(dev)->perf; struct drm_i915_perf_oa_config *args = data; struct i915_oa_config *oa_config, *tmp; - static struct i915_oa_reg *regs; + struct i915_oa_reg *regs; int err, id; if (!perf->i915) { @@ -3926,7 +4227,7 @@ static struct ctl_table dev_root[] = { }; /** - * i915_perf_init - initialize i915-perf state on module load + * i915_perf_init - initialize i915-perf state on module bind * @i915: i915 device instance * * Initializes i915-perf state without exposing anything to userspace. @@ -3959,14 +4260,11 @@ void i915_perf_init(struct drm_i915_private *i915) * worth the complexity to maintain now that BDW+ enable * execlist mode by default. */ - perf->oa_formats = gen8_plus_oa_formats; - - perf->ops.oa_enable = gen8_oa_enable; - perf->ops.oa_disable = gen8_oa_disable; perf->ops.read = gen8_oa_read; - perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; if (IS_GEN_RANGE(i915, 8, 9)) { + perf->oa_formats = gen8_plus_oa_formats; + perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; perf->ops.is_valid_mux_reg = @@ -3979,8 +4277,11 @@ void i915_perf_init(struct drm_i915_private *i915) chv_is_valid_mux_addr; } + perf->ops.oa_enable = gen8_oa_enable; + perf->ops.oa_disable = gen8_oa_disable; perf->ops.enable_metric_set = gen8_enable_metric_set; perf->ops.disable_metric_set = gen8_disable_metric_set; + perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; if (IS_GEN(i915, 8)) { perf->ctx_oactxctrl_offset = 0x120; @@ -3994,6 +4295,8 @@ void i915_perf_init(struct drm_i915_private *i915) perf->gen8_valid_ctx_bit = BIT(16); } } else if (IS_GEN_RANGE(i915, 10, 11)) { + perf->oa_formats = gen8_plus_oa_formats; + perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; perf->ops.is_valid_mux_reg = @@ -4001,8 +4304,11 @@ void i915_perf_init(struct drm_i915_private *i915) perf->ops.is_valid_flex_reg = gen8_is_valid_flex_addr; + perf->ops.oa_enable = gen8_oa_enable; + perf->ops.oa_disable = gen8_oa_disable; perf->ops.enable_metric_set = gen8_enable_metric_set; perf->ops.disable_metric_set = gen10_disable_metric_set; + perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; if (IS_GEN(i915, 10)) { perf->ctx_oactxctrl_offset = 0x128; @@ -4012,6 +4318,24 @@ void i915_perf_init(struct drm_i915_private *i915) perf->ctx_flexeu0_offset = 0x78e; } perf->gen8_valid_ctx_bit = BIT(16); + } else if (IS_GEN(i915, 12)) { + perf->oa_formats = gen12_oa_formats; + + perf->ops.is_valid_b_counter_reg = + gen12_is_valid_b_counter_addr; + perf->ops.is_valid_mux_reg = + gen12_is_valid_mux_addr; + perf->ops.is_valid_flex_reg = + gen8_is_valid_flex_addr; + + perf->ops.oa_enable = gen12_oa_enable; + perf->ops.oa_disable = gen12_oa_disable; + perf->ops.enable_metric_set = gen12_enable_metric_set; + perf->ops.disable_metric_set = gen12_disable_metric_set; + perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; + + perf->ctx_flexeu0_offset = 0; + perf->ctx_oactxctrl_offset = 0x144; } } @@ -4020,7 +4344,6 @@ void i915_perf_init(struct drm_i915_private *i915) oa_sample_rate_hard_limit = 1000 * (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2); - perf->sysctl_header = register_sysctl_table(dev_root); mutex_init(&perf->metrics_lock); idr_init(&perf->metrics_idr); @@ -4056,6 +4379,16 @@ static int destroy_config(int id, void *p, void *data) return 0; } +void i915_perf_sysctl_register(void) +{ + sysctl_header = register_sysctl_table(dev_root); +} + +void i915_perf_sysctl_unregister(void) +{ + unregister_sysctl_table(sysctl_header); +} + /** * i915_perf_fini - Counter part to i915_perf_init() * @i915: i915 device instance @@ -4070,8 +4403,6 @@ void i915_perf_fini(struct drm_i915_private *i915) idr_for_each(&perf->metrics_idr, destroy_config, perf); idr_destroy(&perf->metrics_idr); - unregister_sysctl_table(perf->sysctl_header); - memset(&perf->ops, 0, sizeof(perf->ops)); perf->i915 = NULL; } diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h index 4ceebce72060..882fdd0a7680 100644 --- a/drivers/gpu/drm/i915/i915_perf.h +++ b/drivers/gpu/drm/i915/i915_perf.h @@ -23,6 +23,8 @@ void i915_perf_fini(struct drm_i915_private *i915); void i915_perf_register(struct drm_i915_private *i915); void i915_perf_unregister(struct drm_i915_private *i915); int i915_perf_ioctl_version(void); +void i915_perf_sysctl_register(void); +void i915_perf_sysctl_unregister(void); int i915_perf_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index a1f733fc905a..45e581455f5d 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -199,14 +199,43 @@ struct i915_perf_stream { * @pinned_ctx: The OA context specific information. */ struct intel_context *pinned_ctx; + + /** + * @specific_ctx_id: The id of the specific context. + */ u32 specific_ctx_id; + + /** + * @specific_ctx_id_mask: The mask used to masking specific_ctx_id bits. + */ u32 specific_ctx_id_mask; + /** + * @poll_check_timer: High resolution timer that will periodically + * check for data in the circular OA buffer for notifying userspace + * (e.g. during a read() or poll()). + */ struct hrtimer poll_check_timer; + + /** + * @poll_wq: The wait queue that hrtimer callback wakes when it + * sees data ready to read in the circular OA buffer. + */ wait_queue_head_t poll_wq; + + /** + * @pollin: Whether there is data available to read. + */ bool pollin; + /** + * @periodic: Whether periodic sampling is currently enabled. + */ bool periodic; + + /** + * @period_exponent: The OA unit sampling frequency is derived from this. + */ int period_exponent; /** @@ -276,7 +305,7 @@ struct i915_perf_stream { } oa_buffer; /** - * A batch buffer doing a wait on the GPU for the NOA logic to be + * @noa_wait: A batch buffer doing a wait on the GPU for the NOA logic to be * reprogrammed. */ struct i915_vma *noa_wait; @@ -351,7 +380,6 @@ struct i915_perf { struct drm_i915_private *i915; struct kobject *metrics_kobj; - struct ctl_table_header *sysctl_header; /* * Lock associated with adding/modifying/removing OA configs diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 85912917c062..f3ef6700a5f2 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -12,6 +12,7 @@ #include "gt/intel_engine_user.h" #include "gt/intel_gt_pm.h" #include "gt/intel_rc6.h" +#include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_pmu.h" @@ -143,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt) return ktime_to_ns(ktime_sub(ktime_get(), kt)); } -static u64 __pmu_estimate_rc6(struct i915_pmu *pmu) -{ - u64 val; - - /* - * We think we are runtime suspended. - * - * Report the delta from when the device was suspended to now, - * on top of the last known real value, as the approximated RC6 - * counter value. - */ - val = ktime_since(pmu->sleep_last); - val += pmu->sample[__I915_SAMPLE_RC6].cur; - - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - - return val; -} - -static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val) -{ - /* - * If we are coming back from being runtime suspended we must - * be careful not to report a larger value than returned - * previously. - */ - if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = val; - } else { - val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } - - return val; -} - static u64 get_rc6(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; struct i915_pmu *pmu = &i915->pmu; unsigned long flags; + bool awake = false; u64 val; - val = 0; if (intel_gt_pm_get_if_awake(gt)) { val = __get_rc6(gt); - intel_gt_pm_put(gt); + intel_gt_pm_put_async(gt); + awake = true; } spin_lock_irqsave(&pmu->lock, flags); - if (val) - val = __pmu_update_rc6(pmu, val); + if (awake) { + pmu->sample[__I915_SAMPLE_RC6].cur = val; + } else { + /* + * We think we are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. + */ + val = ktime_since(pmu->sleep_last); + val += pmu->sample[__I915_SAMPLE_RC6].cur; + } + + if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) + val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; else - val = __pmu_estimate_rc6(pmu); + pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; spin_unlock_irqrestore(&pmu->lock, flags); @@ -209,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915) struct i915_pmu *pmu = &i915->pmu; if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) - __pmu_update_rc6(pmu, __get_rc6(&i915->gt)); + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); pmu->sleep_last = ktime_get(); } -static void unpark_rc6(struct drm_i915_private *i915) -{ - struct i915_pmu *pmu = &i915->pmu; - - /* Estimate how long we slept and accumulate that into rc6 counters */ - if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) - __pmu_estimate_rc6(pmu); -} - #else static u64 get_rc6(struct intel_gt *gt) @@ -231,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt) } static void park_rc6(struct drm_i915_private *i915) {} -static void unpark_rc6(struct drm_i915_private *i915) {} #endif @@ -280,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) */ __i915_pmu_maybe_start_timer(pmu); - unpark_rc6(i915); - spin_unlock_irq(&pmu->lock); } @@ -291,6 +259,16 @@ add_sample(struct i915_pmu_sample *sample, u32 val) sample->cur += val; } +static bool exclusive_mmio_access(const struct drm_i915_private *i915) +{ + /* + * We have to avoid concurrent mmio cache line access on gen7 or + * risk a machine hang. For a fun history lesson dig out the old + * userspace intel_gpu_top and run it on Ivybridge or Haswell! + */ + return IS_GEN(i915, 7); +} + static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { @@ -301,8 +279,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; + if (!intel_gt_pm_is_awake(gt)) + return; + for_each_engine(engine, gt, id) { struct intel_engine_pmu *pmu = &engine->pmu; + spinlock_t *mmio_lock; unsigned long flags; bool busy; u32 val; @@ -310,7 +292,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) if (!intel_engine_pm_get_if_awake(engine)) continue; - spin_lock_irqsave(&engine->uncore->lock, flags); + mmio_lock = NULL; + if (exclusive_mmio_access(i915)) + mmio_lock = &engine->uncore->lock; + + if (unlikely(mmio_lock)) + spin_lock_irqsave(mmio_lock, flags); val = ENGINE_READ_FW(engine, RING_CTL); if (val == 0) /* powerwell off => engine idle */ @@ -341,8 +328,9 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); skip: - spin_unlock_irqrestore(&engine->uncore->lock, flags); - intel_engine_pm_put(engine); + if (unlikely(mmio_lock)) + spin_unlock_irqrestore(mmio_lock, flags); + intel_engine_pm_put_async(engine); } } @@ -352,33 +340,57 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) sample->cur += mul_u32_u32(val, mul); } +static bool frequency_sampling_enabled(struct i915_pmu *pmu) +{ + return pmu->enable & + (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | + config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)); +} + static void frequency_sample(struct intel_gt *gt, unsigned int period_ns) { struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; struct i915_pmu *pmu = &i915->pmu; + struct intel_rps *rps = >->rps; + + if (!frequency_sampling_enabled(pmu)) + return; + + /* Report 0/0 (actual/requested) frequency while parked. */ + if (!intel_gt_pm_get_if_awake(gt)) + return; if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { u32 val; - val = i915->gt_pm.rps.cur_freq; - if (intel_gt_pm_get_if_awake(gt)) { - val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1); - val = intel_get_cagf(i915, val); - intel_gt_pm_put(gt); - } + /* + * We take a quick peek here without using forcewake + * so that we don't perturb the system under observation + * (forcewake => !rc6 => increased power use). We expect + * that if the read fails because it is outside of the + * mmio power well, then it will return 0 -- in which + * case we assume the system is running at the intended + * frequency. Fortunately, the read should rarely fail! + */ + val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); + if (val) + val = intel_rps_get_cagf(rps, val); + else + val = rps->cur_freq; add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], - intel_gpu_freq(i915, val), - period_ns / 1000); + intel_gpu_freq(rps, val), period_ns / 1000); } if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], - intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq), + intel_gpu_freq(rps, rps->cur_freq), period_ns / 1000); } + + intel_gt_pm_put_async(gt); } static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) @@ -876,8 +888,8 @@ create_event_attributes(struct i915_pmu *pmu) const char *name; const char *unit; } events[] = { - __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), - __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), + __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), + __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), __event(I915_PMU_INTERRUPTS, "interrupts", NULL), __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), }; @@ -1101,20 +1113,6 @@ void i915_pmu_register(struct drm_i915_private *i915) return; } - i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); - if (!i915_pmu_events_attr_group.attrs) - goto err; - - pmu->base.attr_groups = i915_pmu_attr_groups; - pmu->base.task_ctx_nr = perf_invalid_context; - pmu->base.event_init = i915_pmu_event_init; - pmu->base.add = i915_pmu_event_add; - pmu->base.del = i915_pmu_event_del; - pmu->base.start = i915_pmu_event_start; - pmu->base.stop = i915_pmu_event_stop; - pmu->base.read = i915_pmu_event_read; - pmu->base.event_idx = i915_pmu_event_event_idx; - spin_lock_init(&pmu->lock); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; @@ -1128,9 +1126,23 @@ void i915_pmu_register(struct drm_i915_private *i915) if (!pmu->name) goto err; + i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); + if (!i915_pmu_events_attr_group.attrs) + goto err_name; + + pmu->base.attr_groups = i915_pmu_attr_groups; + pmu->base.task_ctx_nr = perf_invalid_context; + pmu->base.event_init = i915_pmu_event_init; + pmu->base.add = i915_pmu_event_add; + pmu->base.del = i915_pmu_event_del; + pmu->base.start = i915_pmu_event_start; + pmu->base.stop = i915_pmu_event_stop; + pmu->base.read = i915_pmu_event_read; + pmu->base.event_idx = i915_pmu_event_event_idx; + ret = perf_pmu_register(&pmu->base, pmu->name, -1); if (ret) - goto err_name; + goto err_attr; ret = i915_pmu_register_cpuhp_state(pmu); if (ret) @@ -1140,13 +1152,14 @@ void i915_pmu_register(struct drm_i915_private *i915) err_unreg: perf_pmu_unregister(&pmu->base); +err_attr: + pmu->base.event_init = NULL; + free_event_attributes(pmu); err_name: if (!is_igp(i915)) kfree(pmu->name); err: - pmu->base.event_init = NULL; - free_event_attributes(pmu); - DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); + dev_notice(i915->drm.dev, "Failed to register PMU!\n"); } void i915_pmu_unregister(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index bf52e3983631..6c1647c5daf2 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -18,7 +18,7 @@ enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, __I915_SAMPLE_RC6, - __I915_SAMPLE_RC6_ESTIMATED, + __I915_SAMPLE_RC6_LAST_REPORTED, __I915_NUM_PMU_SAMPLERS }; diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h index 21037a2e2038..732aad148881 100644 --- a/drivers/gpu/drm/i915/i915_priolist_types.h +++ b/drivers/gpu/drm/i915/i915_priolist_types.h @@ -16,6 +16,12 @@ enum { I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + + /* A preemptive pulse used to monitor the health of each engine */ + I915_PRIORITY_HEARTBEAT, + + /* Interactive workload, scheduled for immediate pageflipping */ + I915_PRIORITY_DISPLAY, }; #define I915_USER_PRIORITY_SHIFT 2 @@ -39,6 +45,7 @@ enum { * active request. */ #define I915_PRIORITY_UNPREEMPTABLE INT_MAX +#define I915_PRIORITY_BARRIER INT_MAX #define __NO_PREEMPTION (I915_PRIORITY_WAIT) diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index c27cfef9281c..ef25ce6e395e 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -103,15 +103,18 @@ query_engine_info(struct drm_i915_private *i915, struct drm_i915_engine_info __user *info_ptr; struct drm_i915_query_engine_info query; struct drm_i915_engine_info info = { }; + unsigned int num_uabi_engines = 0; struct intel_engine_cs *engine; int len, ret; if (query_item->flags) return -EINVAL; + for_each_uabi_engine(engine, i915) + num_uabi_engines++; + len = sizeof(struct drm_i915_query_engine_info) + - RUNTIME_INFO(i915)->num_engines * - sizeof(struct drm_i915_engine_info); + num_uabi_engines * sizeof(struct drm_i915_engine_info); ret = copy_query_item(&query, sizeof(query), len, query_item); if (ret != 0) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 855db888516c..bbfedeb00b7f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -413,6 +413,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014) #define GEN11_VECS_SFC_USAGE_BIT (1 << 0) +#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100) +#define GEN12_SFC_DONE_MAX 4 + #define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) #define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) #define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) @@ -471,6 +474,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOCHK_PPGTT_WT_HSW (0x2 << 3) #define ECOCHK_PPGTT_WB_HSW (0x3 << 3) +#define GEN8_RC6_CTX_INFO _MMIO(0x8504) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1 << 13) #define ECOBITS_PPGTT_CACHE64B (3 << 8) @@ -557,6 +562,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define BCS_SWCTRL _MMIO(0x22200) +/* There are 16 GPR registers */ +#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) +#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) + #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define HS_INVOCATION_COUNT _MMIO(0x2300) @@ -684,6 +693,45 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OABUFFER_SIZE_8M (6 << 3) #define OABUFFER_SIZE_16M (7 << 3) +/* Gen12 OAR unit */ +#define GEN12_OAR_OACONTROL _MMIO(0x2960) +#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 +#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0) + +#define GEN12_OACTXCONTROL _MMIO(0x2360) +#define GEN12_OAR_OASTATUS _MMIO(0x2968) + +/* Gen12 OAG unit */ +#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00) +#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0 +#define GEN12_OAG_OATAILPTR _MMIO(0xdb04) +#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0 + +#define GEN12_OAG_OABUFFER _MMIO(0xdb08) +#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7) +#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3) +#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */ + +#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28) +#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2 +#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1) +#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0) + +#define GEN12_OAG_OACONTROL _MMIO(0xdaf4) +#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2 +#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0) + +#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8) +#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) +#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) +#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) +#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) + +#define GEN12_OAG_OASTATUS _MMIO(0xdafc) +#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2) +#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1) +#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0) + /* * Flexible, Aggregate EU Counter Registers. * Note: these aren't contiguous @@ -920,6 +968,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24 #define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28 +/* Same layout as OASTARTTRIGX */ +#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900) +#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904) +#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908) +#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c) +#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910) +#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914) +#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918) +#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c) + +/* Same layout as OAREPORTTRIGX */ +#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920) +#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924) +#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928) +#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c) +#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930) +#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934) +#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938) +#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c) + /* CECX_0 */ #define OACEC_COMPARE_LESS_OR_EQUAL 6 #define OACEC_COMPARE_NOT_EQUAL 5 @@ -936,6 +1004,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OACEC_SELECT_PREV (1 << 19) #define OACEC_SELECT_BOOLEAN (2 << 19) +/* 11-bit array 0: pass-through, 1: negated */ +#define GEN12_OASCEC_NEGATE_MASK 0x7ff +#define GEN12_OASCEC_NEGATE_SHIFT 21 + /* CECX_1 */ #define OACEC_MASK_MASK 0xffff #define OACEC_CONSIDERATIONS_MASK 0xffff @@ -958,6 +1030,42 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OACEC7_0 _MMIO(0x27a8) #define OACEC7_1 _MMIO(0x27ac) +/* Same layout as CECX_Y */ +#define GEN12_OAG_CEC0_0 _MMIO(0xd940) +#define GEN12_OAG_CEC0_1 _MMIO(0xd944) +#define GEN12_OAG_CEC1_0 _MMIO(0xd948) +#define GEN12_OAG_CEC1_1 _MMIO(0xd94c) +#define GEN12_OAG_CEC2_0 _MMIO(0xd950) +#define GEN12_OAG_CEC2_1 _MMIO(0xd954) +#define GEN12_OAG_CEC3_0 _MMIO(0xd958) +#define GEN12_OAG_CEC3_1 _MMIO(0xd95c) +#define GEN12_OAG_CEC4_0 _MMIO(0xd960) +#define GEN12_OAG_CEC4_1 _MMIO(0xd964) +#define GEN12_OAG_CEC5_0 _MMIO(0xd968) +#define GEN12_OAG_CEC5_1 _MMIO(0xd96c) +#define GEN12_OAG_CEC6_0 _MMIO(0xd970) +#define GEN12_OAG_CEC6_1 _MMIO(0xd974) +#define GEN12_OAG_CEC7_0 _MMIO(0xd978) +#define GEN12_OAG_CEC7_1 _MMIO(0xd97c) + +/* Same layout as CECX_Y + negate 11-bit array */ +#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00) +#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04) +#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08) +#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c) +#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10) +#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14) +#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18) +#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c) +#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20) +#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24) +#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28) +#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c) +#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30) +#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34) +#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38) +#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c) + /* OA perf counters */ #define OA_PERFCNT1_LO _MMIO(0x91B8) #define OA_PERFCNT1_HI _MMIO(0x91BC) @@ -1038,6 +1146,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838) #define MICRO_BP_FIRED_ARMED _MMIO(0x983C) +#define GEN12_OAA_DBG_REG _MMIO(0xdc44) +#define GEN12_OAG_OA_PESS _MMIO(0x2b2c) +#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40) + #define GDT_CHICKEN_BITS _MMIO(0x9840) #define GT_NOA_ENABLE 0x00000080 @@ -2455,6 +2567,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) #define RING_FAULT_VALID (1 << 0) #define DONE_REG _MMIO(0x40b0) +#define GEN12_GAM_DONE _MMIO(0xcf68) #define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) @@ -2490,6 +2603,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) +#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) #define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ #define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) #define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) @@ -2602,6 +2716,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FAULT_VA_HIGH_BITS (0xf << 0) #define FAULT_GTT_SEL (1 << 4) +#define GEN12_AUX_ERR_DBG _MMIO(0x43f4) + #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1 << 31) @@ -4926,14 +5042,20 @@ enum { #define BLM_PCH_POLARITY (1 << 29) #define BLC_PWM_PCH_CTL2 _MMIO(0xc8254) -#define UTIL_PIN_CTL _MMIO(0x48400) -#define UTIL_PIN_ENABLE (1 << 31) - -#define UTIL_PIN_PIPE(x) ((x) << 29) -#define UTIL_PIN_PIPE_MASK (3 << 29) -#define UTIL_PIN_MODE_PWM (1 << 24) -#define UTIL_PIN_MODE_MASK (0xf << 24) -#define UTIL_PIN_POLARITY (1 << 22) +#define UTIL_PIN_CTL _MMIO(0x48400) +#define UTIL_PIN_ENABLE (1 << 31) +#define UTIL_PIN_PIPE_MASK (3 << 29) +#define UTIL_PIN_PIPE(x) ((x) << 29) +#define UTIL_PIN_MODE_MASK (0xf << 24) +#define UTIL_PIN_MODE_DATA (0 << 24) +#define UTIL_PIN_MODE_PWM (1 << 24) +#define UTIL_PIN_MODE_VBLANK (4 << 24) +#define UTIL_PIN_MODE_VSYNC (5 << 24) +#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24) +#define UTIL_PIN_OUTPUT_DATA (1 << 23) +#define UTIL_PIN_POLARITY (1 << 22) +#define UTIL_PIN_DIRECTION_INPUT (1 << 19) +#define UTIL_PIN_INPUT_DATA (1 << 16) /* BXT backlight register definition. */ #define _BXT_BLC_PWM_CTL1 0xC8250 @@ -5535,45 +5657,9 @@ enum { */ #define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) #define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) -#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018) -#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c) -#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020) -#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024) #define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) #define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) -#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118) -#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c) -#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120) -#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124) - -#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210) -#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214) -#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218) -#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c) -#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220) -#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224) - -#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310) -#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314) -#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318) -#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c) -#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320) -#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324) - -#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410) -#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414) -#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418) -#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c) -#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420) -#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424) - -#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510) -#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514) -#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518) -#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c) -#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520) -#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524) #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ @@ -5671,7 +5757,8 @@ enum { #define PIPECONF_DOUBLE_WIDE (1 << 30) #define I965_PIPECONF_ACTIVE (1 << 30) #define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */ -#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) +#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) /* pre-hsw */ +#define PIPECONF_FRAME_START_DELAY(x) ((x) << 27) /* pre-hsw: 0-3 */ #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1 << 25) @@ -6279,6 +6366,7 @@ enum { #define DISPPLANE_RGBX101010 (0x8 << 26) #define DISPPLANE_RGBA101010 (0x9 << 26) #define DISPPLANE_BGRX101010 (0xa << 26) +#define DISPPLANE_BGRA101010 (0xb << 26) #define DISPPLANE_RGBX161616 (0xc << 26) #define DISPPLANE_RGBX888 (0xe << 26) #define DISPPLANE_RGBA888 (0xf << 26) @@ -6553,12 +6641,15 @@ enum { #define SP_ENABLE (1 << 31) #define SP_GAMMA_ENABLE (1 << 30) #define SP_PIXFORMAT_MASK (0xf << 26) -#define SP_FORMAT_YUV422 (0 << 26) -#define SP_FORMAT_BGR565 (5 << 26) -#define SP_FORMAT_BGRX8888 (6 << 26) -#define SP_FORMAT_BGRA8888 (7 << 26) -#define SP_FORMAT_RGBX1010102 (8 << 26) -#define SP_FORMAT_RGBA1010102 (9 << 26) +#define SP_FORMAT_YUV422 (0x0 << 26) +#define SP_FORMAT_8BPP (0x2 << 26) +#define SP_FORMAT_BGR565 (0x5 << 26) +#define SP_FORMAT_BGRX8888 (0x6 << 26) +#define SP_FORMAT_BGRA8888 (0x7 << 26) +#define SP_FORMAT_RGBX1010102 (0x8 << 26) +#define SP_FORMAT_RGBA1010102 (0x9 << 26) +#define SP_FORMAT_BGRX1010102 (0xa << 26) /* CHV pipe B */ +#define SP_FORMAT_BGRA1010102 (0xb << 26) /* CHV pipe B */ #define SP_FORMAT_RGBX8888 (0xe << 26) #define SP_FORMAT_RGBA8888 (0xf << 26) #define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ @@ -6709,6 +6800,7 @@ enum { #define PLANE_CTL_YUV422_VYUY (3 << 16) #define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) +#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */ #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ #define PLANE_CTL_TILED_MASK (0x7 << 10) #define PLANE_CTL_TILED_LINEAR (0 << 10) @@ -7279,6 +7371,10 @@ enum { #define DMC_DEBUG3 _MMIO(0x101090) +/* Display Internal Timeout Register */ +#define RM_TIMEOUT _MMIO(0x42060) +#define MMIO_TIMEOUT_US(us) ((us) << 0) + /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_SPRITEB_FLIP_DONE (1 << 29) @@ -7390,6 +7486,9 @@ enum { #define GEN8_PIPE_VSYNC (1 << 1) #define GEN8_PIPE_VBLANK (1 << 0) #define GEN9_PIPE_CURSOR_FAULT (1 << 11) +#define GEN11_PIPE_PLANE7_FAULT (1 << 22) +#define GEN11_PIPE_PLANE6_FAULT (1 << 21) +#define GEN11_PIPE_PLANE5_FAULT (1 << 20) #define GEN9_PIPE_PLANE4_FAULT (1 << 10) #define GEN9_PIPE_PLANE3_FAULT (1 << 9) #define GEN9_PIPE_PLANE2_FAULT (1 << 8) @@ -7409,16 +7508,25 @@ enum { GEN9_PIPE_PLANE3_FAULT | \ GEN9_PIPE_PLANE2_FAULT | \ GEN9_PIPE_PLANE1_FAULT) +#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \ + (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \ + GEN11_PIPE_PLANE7_FAULT | \ + GEN11_PIPE_PLANE6_FAULT | \ + GEN11_PIPE_PLANE5_FAULT) #define GEN8_DE_PORT_ISR _MMIO(0x44440) #define GEN8_DE_PORT_IMR _MMIO(0x44444) #define GEN8_DE_PORT_IIR _MMIO(0x44448) #define GEN8_DE_PORT_IER _MMIO(0x4444c) +#define DSI1_NON_TE (1 << 31) +#define DSI0_NON_TE (1 << 30) #define ICL_AUX_CHANNEL_E (1 << 29) #define CNL_AUX_CHANNEL_F (1 << 28) #define GEN9_AUX_CHANNEL_D (1 << 27) #define GEN9_AUX_CHANNEL_C (1 << 26) #define GEN9_AUX_CHANNEL_B (1 << 25) +#define DSI1_TE (1 << 24) +#define DSI0_TE (1 << 23) #define BXT_DE_PORT_HP_DDIC (1 << 5) #define BXT_DE_PORT_HP_DDIB (1 << 4) #define BXT_DE_PORT_HP_DDIA (1 << 3) @@ -7428,6 +7536,12 @@ enum { #define GEN8_PORT_DP_A_HOTPLUG (1 << 3) #define BXT_DE_PORT_GMBUS (1 << 1) #define GEN8_AUX_CHANNEL_A (1 << 0) +#define TGL_DE_PORT_AUX_USBC6 (1 << 13) +#define TGL_DE_PORT_AUX_USBC5 (1 << 12) +#define TGL_DE_PORT_AUX_USBC4 (1 << 11) +#define TGL_DE_PORT_AUX_USBC3 (1 << 10) +#define TGL_DE_PORT_AUX_USBC2 (1 << 9) +#define TGL_DE_PORT_AUX_USBC1 (1 << 8) #define TGL_DE_PORT_AUX_DDIC (1 << 2) #define TGL_DE_PORT_AUX_DDIB (1 << 1) #define TGL_DE_PORT_AUX_DDIA (1 << 0) @@ -7616,10 +7730,19 @@ enum { #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) -#define CHICKEN_TRANS_A _MMIO(0x420c0) -#define CHICKEN_TRANS_B _MMIO(0x420c4) -#define CHICKEN_TRANS_C _MMIO(0x420c8) -#define CHICKEN_TRANS_EDP _MMIO(0x420cc) +#define _CHICKEN_TRANS_A 0x420c0 +#define _CHICKEN_TRANS_B 0x420c4 +#define _CHICKEN_TRANS_C 0x420c8 +#define _CHICKEN_TRANS_EDP 0x420cc +#define _CHICKEN_TRANS_D 0x420d8 +#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \ + [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \ + [TRANSCODER_A] = _CHICKEN_TRANS_A, \ + [TRANSCODER_B] = _CHICKEN_TRANS_B, \ + [TRANSCODER_C] = _CHICKEN_TRANS_C, \ + [TRANSCODER_D] = _CHICKEN_TRANS_D)) +#define HSW_FRAME_START_DELAY_MASK (3 << 27) +#define HSW_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */ #define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ #define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) #define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) @@ -7643,6 +7766,14 @@ enum { #define GEN7_MSG_CTL _MMIO(0x45010) #define WAIT_FOR_PCH_RESET_ACK (1 << 1) #define WAIT_FOR_PCH_FLR_ACK (1 << 0) + +#define BW_BUDDY1_CTL _MMIO(0x45140) +#define BW_BUDDY2_CTL _MMIO(0x45150) +#define BW_BUDDY_DISABLE REG_BIT(31) + +#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144) +#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154) + #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) #define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) @@ -7652,15 +7783,19 @@ enum { #define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7) #define SKL_DFSM _MMIO(0x51000) -#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) -#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) -#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) -#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) -#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) -#define SKL_DFSM_PIPE_A_DISABLE (1 << 30) -#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) -#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) -#define TGL_DFSM_PIPE_D_DISABLE (1 << 22) +#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27) +#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25) +#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) +#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) +#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) +#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) +#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) +#define ICL_DFSM_DMC_DISABLE (1 << 23) +#define SKL_DFSM_PIPE_A_DISABLE (1 << 30) +#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) +#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) +#define TGL_DFSM_PIPE_D_DISABLE (1 << 22) +#define CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7) #define SKL_DSSM _MMIO(0x51004) #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) @@ -7796,6 +7931,10 @@ enum { #define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) #define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define FF_MODE2 _MMIO(0x6604) +#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16) +#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4) + /* PCH */ #define PCH_DISPLAY_BASE 0xc0000u @@ -7984,6 +8123,10 @@ enum { #define SHOTPLUG_CTL_TC _MMIO(0xc4034) #define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) + +#define SHPD_FILTER_CNT _MMIO(0xc4038) +#define SHPD_FILTER_CNT_500_ADJ 0x001D9 + /* Icelake DSC Rate Control Range Parameter Registers */ #define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) #define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) @@ -8334,10 +8477,8 @@ enum { #define TRANS_STATE_MASK (1 << 30) #define TRANS_STATE_DISABLE (0 << 30) #define TRANS_STATE_ENABLE (1 << 30) -#define TRANS_FSYNC_DELAY_HB1 (0 << 27) -#define TRANS_FSYNC_DELAY_HB2 (1 << 27) -#define TRANS_FSYNC_DELAY_HB3 (2 << 27) -#define TRANS_FSYNC_DELAY_HB4 (3 << 27) +#define TRANS_FRAME_START_DELAY_MASK (3 << 27) /* ibx */ +#define TRANS_FRAME_START_DELAY(x) ((x) << 27) /* ibx: 0-3 */ #define TRANS_INTERLACE_MASK (7 << 21) #define TRANS_PROGRESSIVE (0 << 21) #define TRANS_INTERLACED (3 << 21) @@ -8358,6 +8499,7 @@ enum { #define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31) #define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29) #define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27) +#define TRANS_CHICKEN2_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */ #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26) #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25) @@ -9296,11 +9438,9 @@ enum skl_power_gate { #define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) #define _ICL_AUX_ANAOVRD1_A 0x162398 #define _ICL_AUX_ANAOVRD1_B 0x6C398 -#define _TGL_AUX_ANAOVRD1_C 0x160398 #define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ _ICL_AUX_ANAOVRD1_A, \ - _ICL_AUX_ANAOVRD1_B, \ - _TGL_AUX_ANAOVRD1_C)) + _ICL_AUX_ANAOVRD1_B)) #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) @@ -9562,6 +9702,10 @@ enum skl_power_gate { #define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12) #define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12) +#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12) +#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10) +#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \ + REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans) #define TRANS_DDI_HDCP_SIGNALLING (1 << 9) #define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8) #define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7) @@ -10249,6 +10393,12 @@ enum skl_power_gate { _DKL_PHY2_BASE) + \ _DKL_TX_FW_CALIB) +#define _DKL_TX_PMD_LANE_SUS 0xD00 +#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_PMD_LANE_SUS) + #define _DKL_TX_DW17 0xDC4 #define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \ _DKL_PHY1_BASE, \ @@ -10658,6 +10808,57 @@ enum skl_power_gate { #define ICL_ESC_CLK_DIV_SHIFT 0 #define DSI_MAX_ESC_CLK 20000 /* in KHz */ +#define _DSI_CMD_FRMCTL_0 0x6b034 +#define _DSI_CMD_FRMCTL_1 0x6b834 +#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \ + _DSI_CMD_FRMCTL_0,\ + _DSI_CMD_FRMCTL_1) +#define DSI_FRAME_UPDATE_REQUEST (1 << 31) +#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29) +#define DSI_NULL_PACKET_ENABLE (1 << 28) +#define DSI_FRAME_IN_PROGRESS (1 << 0) + +#define _DSI_INTR_MASK_REG_0 0x6b070 +#define _DSI_INTR_MASK_REG_1 0x6b870 +#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \ + _DSI_INTR_MASK_REG_0,\ + _DSI_INTR_MASK_REG_1) + +#define _DSI_INTR_IDENT_REG_0 0x6b074 +#define _DSI_INTR_IDENT_REG_1 0x6b874 +#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \ + _DSI_INTR_IDENT_REG_0,\ + _DSI_INTR_IDENT_REG_1) +#define DSI_TE_EVENT (1 << 31) +#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30) +#define DSI_TX_DATA (1 << 29) +#define DSI_ULPS_ENTRY_DONE (1 << 28) +#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27) +#define DSI_HOST_CHKSUM_ERROR (1 << 26) +#define DSI_HOST_MULTI_ECC_ERROR (1 << 25) +#define DSI_HOST_SINGL_ECC_ERROR (1 << 24) +#define DSI_HOST_CONTENTION_DETECTED (1 << 23) +#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22) +#define DSI_HOST_TIMEOUT_ERROR (1 << 21) +#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20) +#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19) +#define DSI_FRAME_UPDATE_DONE (1 << 16) +#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15) +#define DSI_INVALID_TX_LENGTH (1 << 13) +#define DSI_INVALID_VC (1 << 12) +#define DSI_INVALID_DATA_TYPE (1 << 11) +#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10) +#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9) +#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8) +#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7) +#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6) +#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5) +#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4) +#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3) +#define DSI_EOT_SYNC_ERROR (1 << 2) +#define DSI_SOT_SYNC_ERROR (1 << 1) +#define DSI_SOT_ERROR (1 << 0) + /* Gen4+ Timestamp and Pipe Frame time stamp registers */ #define GEN4_TIMESTAMP _MMIO(0x2358) #define ILK_TIMESTAMP_HI _MMIO(0x70070) @@ -11262,6 +11463,7 @@ enum skl_power_gate { #define CMD_MODE_TE_GATE (0x1 << 28) #define VIDEO_MODE_SYNC_EVENT (0x2 << 28) #define VIDEO_MODE_SYNC_PULSE (0x3 << 28) +#define TE_SOURCE_GPIO (1 << 27) #define LINK_READY (1 << 20) #define PIX_FMT_MASK (0x3 << 16) #define PIX_FMT_SHIFT 16 @@ -11514,13 +11716,18 @@ enum skl_power_gate { /* MOCS (Memory Object Control State) registers */ #define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ -#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */ -#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */ -#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */ -#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */ -#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */ -/* Media decoder 2 MOCS registers */ -#define GEN11_MFX2_MOCS(i) _MMIO(0x10000 + (i) * 4) +#define __GEN9_RCS0_MOCS0 0xc800 +#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4) +#define __GEN9_VCS0_MOCS0 0xc900 +#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4) +#define __GEN9_VCS1_MOCS0 0xca00 +#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4) +#define __GEN9_VECS0_MOCS0 0xcb00 +#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4) +#define __GEN9_BCS0_MOCS0 0xcc00 +#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4) +#define __GEN11_VCS2_MOCS0 0x10000 +#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4) #define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0) #define PMFLUSHDONE_LNICRSDROP (1 << 20) @@ -11876,7 +12083,7 @@ enum skl_power_gate { /* This register controls the Display State Buffer (DSB) engines. */ #define _DSBSL_INSTANCE_BASE 0x70B00 #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ - (pipe) * 0x1000 + (id) * 100) + (pipe) * 0x1000 + (id) * 0x100) #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 4575f368455d..44a0d1a950c5 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -31,6 +31,8 @@ #include "gem/i915_gem_context.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" +#include "gt/intel_rps.h" #include "i915_active.h" #include "i915_drv.h" @@ -55,11 +57,13 @@ static struct i915_global_request { static const char *i915_fence_get_driver_name(struct dma_fence *fence) { - return "i915"; + return dev_name(to_request(fence)->i915->drm.dev); } static const char *i915_fence_get_timeline_name(struct dma_fence *fence) { + const struct i915_gem_context *ctx; + /* * The timeline struct (as part of the ppgtt underneath a context) * may be freed when the request is no longer in use by the GPU. @@ -72,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return "signaled"; - return to_request(fence)->gem_context->name ?: "[i915]"; + ctx = i915_request_gem_context(to_request(fence)); + if (!ctx) + return "[" DRIVER_NAME "]"; + + return ctx->name; } static bool i915_fence_signaled(struct dma_fence *fence) @@ -186,7 +194,7 @@ static void free_capture_list(struct i915_request *request) { struct i915_capture_list *capture; - capture = request->capture_list; + capture = fetch_and_zero(&request->capture_list); while (capture) { struct i915_capture_list *next = capture->next; @@ -212,7 +220,7 @@ static void remove_from_engine(struct i915_request *rq) spin_lock(&engine->active.lock); locked = engine; } - list_del(&rq->sched.link); + list_del_init(&rq->sched.link); spin_unlock_irq(&locked->active.lock); } @@ -221,10 +229,7 @@ bool i915_request_retire(struct i915_request *rq) if (!i915_request_completed(rq)) return false; - GEM_TRACE("%s fence %llx:%lld, current %d\n", - rq->engine->name, - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq)); + RQ_TRACE(rq, "\n"); GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); trace_i915_request_retire(rq); @@ -257,8 +262,8 @@ bool i915_request_retire(struct i915_request *rq) if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) i915_request_cancel_breadcrumb(rq); if (i915_request_has_waitboost(rq)) { - GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); - atomic_dec(&rq->i915->gt_pm.rps.num_waiters); + GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); + atomic_dec(&rq->engine->gt->rps.num_waiters); } if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); @@ -270,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq) remove_from_client(rq); list_del(&rq->link); - intel_context_exit(rq->hw_context); - intel_context_unpin(rq->hw_context); + intel_context_exit(rq->context); + intel_context_unpin(rq->context); free_capture_list(rq); i915_sched_node_fini(&rq->sched); @@ -285,10 +290,7 @@ void i915_request_retire_upto(struct i915_request *rq) struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_request *tmp; - GEM_TRACE("%s fence %llx:%lld, current %d\n", - rq->engine->name, - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq)); + RQ_TRACE(rq, "\n"); GEM_BUG_ON(!i915_request_completed(rq)); @@ -298,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq) } static int -__i915_request_await_execution(struct i915_request *rq, - struct i915_request *signal, - void (*hook)(struct i915_request *rq, - struct dma_fence *signal), - gfp_t gfp) +__await_execution(struct i915_request *rq, + struct i915_request *signal, + void (*hook)(struct i915_request *rq, + struct dma_fence *signal), + gfp_t gfp) { struct execute_cb *cb; @@ -339,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq, } spin_unlock_irq(&signal->lock); + /* Copy across semaphore status as we need the same behaviour */ + rq->sched.flags |= signal->sched.flags; return 0; } @@ -347,10 +351,7 @@ bool __i915_request_submit(struct i915_request *request) struct intel_engine_cs *engine = request->engine; bool result = false; - GEM_TRACE("%s fence %llx:%lld, current %d\n", - engine->name, - request->fence.context, request->fence.seqno, - hwsp_seqno(request)); + RQ_TRACE(request, "\n"); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->active.lock); @@ -374,7 +375,7 @@ bool __i915_request_submit(struct i915_request *request) if (i915_request_completed(request)) goto xfer; - if (i915_gem_context_is_banned(request->gem_context)) + if (intel_context_is_banned(request->context)) i915_request_skip(request, -EIO); /* @@ -413,7 +414,7 @@ xfer: /* We may be recursing from the signal callback of another i915 fence */ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && !i915_request_enable_breadcrumb(request)) - intel_engine_queue_breadcrumbs(engine); + intel_engine_signal_breadcrumbs(engine); __notify_execute_cb(request); @@ -439,10 +440,7 @@ void __i915_request_unsubmit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - GEM_TRACE("%s fence %llx:%lld, current %d\n", - engine->name, - request->fence.context, request->fence.seqno, - hwsp_seqno(request)); + RQ_TRACE(request, "\n"); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->active.lock); @@ -584,6 +582,21 @@ out: return kmem_cache_alloc(global.slab_requests, gfp); } +static void __i915_request_ctor(void *arg) +{ + struct i915_request *rq = arg; + + spin_lock_init(&rq->lock); + i915_sched_node_init(&rq->sched); + i915_sw_fence_init(&rq->submit, submit_notify); + i915_sw_fence_init(&rq->semaphore, semaphore_notify); + + rq->file_priv = NULL; + rq->capture_list = NULL; + + INIT_LIST_HEAD(&rq->execute_cb); +} + struct i915_request * __i915_request_create(struct intel_context *ce, gfp_t gfp) { @@ -641,35 +654,32 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) goto err_free; rq->i915 = ce->engine->i915; - rq->hw_context = ce; - rq->gem_context = ce->gem_context; + rq->context = ce; rq->engine = ce->engine; rq->ring = ce->ring; rq->execution_mask = ce->engine->mask; + rq->flags = 0; - rcu_assign_pointer(rq->timeline, tl); + RCU_INIT_POINTER(rq->timeline, tl); + RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); rq->hwsp_seqno = tl->hwsp_seqno; - rq->hwsp_cacheline = tl->hwsp_cacheline; rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ - spin_lock_init(&rq->lock); dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, tl->fence_context, seqno); /* We bump the ref for the fence chain */ - i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); - i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify); + i915_sw_fence_reinit(&i915_request_get(rq)->submit); + i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); - i915_sched_node_init(&rq->sched); + i915_sched_node_reinit(&rq->sched); - /* No zalloc, must clear what we need by hand */ - rq->file_priv = NULL; + /* No zalloc, everything must be cleared after use */ rq->batch = NULL; - rq->capture_list = NULL; - rq->flags = 0; - - INIT_LIST_HEAD(&rq->execute_cb); + GEM_BUG_ON(rq->file_priv); + GEM_BUG_ON(rq->capture_list); + GEM_BUG_ON(!list_empty(&rq->execute_cb)); /* * Reserve space in the ring buffer for all the commands required to @@ -751,34 +761,37 @@ err_unlock: static int i915_request_await_start(struct i915_request *rq, struct i915_request *signal) { - struct intel_timeline *tl; struct dma_fence *fence; int err; GEM_BUG_ON(i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)); + fence = NULL; rcu_read_lock(); - tl = rcu_dereference(signal->timeline); - if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref)) - tl = NULL; - rcu_read_unlock(); - if (!tl) /* already started or maybe even completed */ - return 0; + spin_lock_irq(&signal->lock); + if (!i915_request_started(signal) && + !list_is_first(&signal->link, + &rcu_dereference(signal->timeline)->requests)) { + struct i915_request *prev = list_prev_entry(signal, link); - fence = ERR_PTR(-EBUSY); - if (mutex_trylock(&tl->mutex)) { - fence = NULL; - if (!i915_request_started(signal) && - !list_is_first(&signal->link, &tl->requests)) { - signal = list_prev_entry(signal, link); - fence = dma_fence_get(&signal->fence); + /* + * Peek at the request before us in the timeline. That + * request will only be valid before it is retired, so + * after acquiring a reference to it, confirm that it is + * still part of the signaler's timeline. + */ + if (i915_request_get_rcu(prev)) { + if (list_next_entry(prev, link) == signal) + fence = &prev->fence; + else + i915_request_put(prev); } - mutex_unlock(&tl->mutex); } - intel_timeline_put(tl); - if (IS_ERR_OR_NULL(fence)) - return PTR_ERR_OR_ZERO(fence); + spin_unlock_irq(&signal->lock); + rcu_read_unlock(); + if (!fence) + return 0; err = 0; if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) @@ -809,31 +822,21 @@ already_busywaiting(struct i915_request *rq) } static int -emit_semaphore_wait(struct i915_request *to, - struct i915_request *from, - gfp_t gfp) +__emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + u32 seqno) { const int has_token = INTEL_GEN(to->i915) >= 12; u32 hwsp_offset; - int len; + int len, err; u32 *cs; GEM_BUG_ON(INTEL_GEN(to->i915) < 8); - /* Just emit the first semaphore we see as request space is limited. */ - if (already_busywaiting(to) & from->engine->mask) - goto await_fence; - - if (i915_request_await_start(to, from) < 0) - goto await_fence; - - /* Only submit our spinner after the signaler is running! */ - if (__i915_request_await_execution(to, from, NULL, gfp)) - goto await_fence; - /* We need to pin the signaler's HWSP until we are finished reading. */ - if (intel_timeline_read_hwsp(from, to, &hwsp_offset)) - goto await_fence; + err = intel_timeline_read_hwsp(from, to, &hwsp_offset); + if (err) + return err; len = 4; if (has_token) @@ -856,7 +859,7 @@ emit_semaphore_wait(struct i915_request *to, MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD) + has_token; - *cs++ = from->fence.seqno; + *cs++ = seqno; *cs++ = hwsp_offset; *cs++ = 0; if (has_token) { @@ -865,6 +868,28 @@ emit_semaphore_wait(struct i915_request *to, } intel_ring_advance(to, cs); + return 0; +} + +static int +emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + gfp_t gfp) +{ + /* Just emit the first semaphore we see as request space is limited. */ + if (already_busywaiting(to) & from->engine->mask) + goto await_fence; + + if (i915_request_await_start(to, from) < 0) + goto await_fence; + + /* Only submit our spinner after the signaler is running! */ + if (__await_execution(to, from, NULL, gfp)) + goto await_fence; + + if (__emit_semaphore_wait(to, from, from->fence.seqno)) + goto await_fence; + to->sched.semaphores |= from->engine->mask; to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; return 0; @@ -892,18 +917,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) return ret; } - if (to->engine == from->engine) { + if (to->engine == from->engine) ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, &from->submit, I915_FENCE_GFP); - } else if (intel_engine_has_semaphores(to->engine) && - to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) { + else if (intel_context_use_semaphores(to->context)) ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); - } else { + else ret = i915_sw_fence_await_dma_fence(&to->submit, &from->fence, 0, I915_FENCE_GFP); - } if (ret < 0) return ret; @@ -943,8 +966,10 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) do { fence = *child++; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + i915_sw_fence_set_error_once(&rq->submit, fence->error); continue; + } /* * Requests on the same timeline are explicitly ordered, along @@ -978,6 +1003,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) return 0; } +static bool intel_timeline_sync_has_start(struct intel_timeline *tl, + struct dma_fence *fence) +{ + return __intel_timeline_sync_is_later(tl, + fence->context, + fence->seqno - 1); +} + +static int intel_timeline_sync_set_start(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1); +} + +static int +__i915_request_await_execution(struct i915_request *to, + struct i915_request *from, + void (*hook)(struct i915_request *rq, + struct dma_fence *signal)) +{ + int err; + + /* Submit both requests at the same time */ + err = __await_execution(to, from, hook, I915_FENCE_GFP); + if (err) + return err; + + /* Squash repeated depenendices to the same timelines */ + if (intel_timeline_sync_has_start(i915_request_timeline(to), + &from->fence)) + return 0; + + /* Ensure both start together [after all semaphores in signal] */ + if (intel_engine_has_semaphores(to->engine)) + err = __emit_semaphore_wait(to, from, from->fence.seqno - 1); + else + err = i915_request_await_start(to, from); + if (err < 0) + return err; + + /* Couple the dependency tree for PI on this exposed to->fence */ + if (to->engine->schedule) { + err = i915_sched_node_add_dependency(&to->sched, &from->sched); + if (err < 0) + return err; + } + + return intel_timeline_sync_set_start(i915_request_timeline(to), + &from->fence); +} + int i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence, @@ -1000,8 +1076,10 @@ i915_request_await_execution(struct i915_request *rq, do { fence = *child++; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + i915_sw_fence_set_error_once(&rq->submit, fence->error); continue; + } /* * We don't squash repeated fence dependencies here as we @@ -1011,8 +1089,7 @@ i915_request_await_execution(struct i915_request *rq, if (dma_fence_is_i915(fence)) ret = __i915_request_await_execution(rq, to_request(fence), - hook, - I915_FENCE_GFP); + hook); else ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, I915_FENCE_TIMEOUT, @@ -1178,8 +1255,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq) struct intel_ring *ring = rq->ring; u32 *cs; - GEM_TRACE("%s fence %llx:%lld\n", - engine->name, rq->fence.context, rq->fence.seqno); + RQ_TRACE(rq, "\n"); /* * To ensure that this call will not fail, space for its emissions @@ -1225,8 +1301,8 @@ void __i915_request_queue(struct i915_request *rq, void i915_request_add(struct i915_request *rq) { - struct i915_sched_attr attr = rq->gem_context->sched; struct intel_timeline * const tl = i915_request_timeline(rq); + struct i915_sched_attr attr = {}; struct i915_request *prev; lockdep_assert_held(&tl->mutex); @@ -1236,6 +1312,9 @@ void i915_request_add(struct i915_request *rq) prev = __i915_request_commit(rq); + if (rcu_access_pointer(rq->context->gem_context)) + attr = i915_request_gem_context(rq)->sched; + /* * Boost actual workloads past semaphores! * @@ -1446,7 +1525,7 @@ long i915_request_wait(struct i915_request *rq, * completion. That requires having a good predictor for the request * duration, which we currently lack. */ - if (CONFIG_DRM_I915_SPIN_REQUEST && + if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) && __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) { dma_fence_signal(&rq->fence); goto out; @@ -1466,7 +1545,7 @@ long i915_request_wait(struct i915_request *rq, */ if (flags & I915_WAIT_PRIORITY) { if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) - gen6_rps_boost(rq); + intel_rps_boost(rq); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); } @@ -1500,7 +1579,7 @@ long i915_request_wait(struct i915_request *rq, dma_fence_remove_callback(&rq->fence, &wait.cb); out: - mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_); + mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); trace_i915_request_wait_end(rq); return timeout; } @@ -1531,10 +1610,14 @@ static struct i915_global_request global = { { int __init i915_global_request_init(void) { - global.slab_requests = KMEM_CACHE(i915_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); + global.slab_requests = + kmem_cache_create("i915_request", + sizeof(struct i915_request), + __alignof__(struct i915_request), + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU, + __i915_request_ctor); if (!global.slab_requests) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 96991d64759c..565322640378 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -28,8 +28,10 @@ #include <linux/dma-fence.h> #include <linux/lockdep.h> +#include "gem/i915_gem_context_types.h" #include "gt/intel_context_types.h" #include "gt/intel_engine_types.h" +#include "gt/intel_timeline_types.h" #include "i915_gem.h" #include "i915_scheduler.h" @@ -41,14 +43,19 @@ struct drm_file; struct drm_i915_gem_object; struct i915_request; -struct intel_timeline; -struct intel_timeline_cacheline; struct i915_capture_list { struct i915_capture_list *next; struct i915_vma *vma; }; +#define RQ_TRACE(rq, fmt, ...) do { \ + const struct i915_request *rq__ = (rq); \ + ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt, \ + rq__->fence.context, rq__->fence.seqno, \ + hwsp_seqno(rq__), ##__VA_ARGS__); \ +} while (0) + enum { /* * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. @@ -109,9 +116,8 @@ struct i915_request { * i915_request_free() will then decrement the refcount on the * context. */ - struct i915_gem_context *gem_context; struct intel_engine_cs *engine; - struct intel_context *hw_context; + struct intel_context *context; struct intel_ring *ring; struct intel_timeline __rcu *timeline; struct list_head signal_link; @@ -144,6 +150,10 @@ struct i915_request { union { wait_queue_entry_t submitq; struct i915_sw_dma_fence_cb dmaq; + struct i915_request_duration_cb { + struct dma_fence_cb cb; + ktime_t emitted; + } duration; }; struct list_head execute_cb; struct i915_sw_fence semaphore; @@ -176,7 +186,7 @@ struct i915_request { * inside the timeline's HWSP vma, but it is only valid while this * request has not completed and guarded by the timeline mutex. */ - struct intel_timeline_cacheline *hwsp_cacheline; + struct intel_timeline_cacheline __rcu *hwsp_cacheline; /** Position in the ring of the start of the request */ u32 head; @@ -454,6 +464,13 @@ i915_request_timeline(struct i915_request *rq) lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex)); } +static inline struct i915_gem_context * +i915_request_gem_context(struct i915_request *rq) +{ + /* Valid only while the request is being constructed (or retired). */ + return rcu_dereference_protected(rq->context->gem_context, true); +} + static inline struct intel_timeline * i915_request_active_timeline(struct i915_request *rq) { diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 0ca40f6bf08c..bf87c70bfdd9 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -189,22 +189,39 @@ static inline bool need_preempt(int prio, int active) return prio >= max(I915_PRIORITY_NORMAL, active); } -static void kick_submission(struct intel_engine_cs *engine, int prio) +static void kick_submission(struct intel_engine_cs *engine, + const struct i915_request *rq, + int prio) { - const struct i915_request *inflight = - execlists_active(&engine->execlists); + const struct i915_request *inflight; /* - * If we are already the currently executing context, don't - * bother evaluating if we should preempt ourselves, or if - * we expect nothing to change as a result of running the - * tasklet, i.e. we have not change the priority queue - * sufficiently to oust the running context. + * We only need to kick the tasklet once for the high priority + * new context we add into the queue. */ - if (!inflight || !need_preempt(prio, rq_prio(inflight))) + if (prio <= engine->execlists.queue_priority_hint) return; - tasklet_hi_schedule(&engine->execlists.tasklet); + rcu_read_lock(); + + /* Nothing currently active? We're overdue for a submission! */ + inflight = execlists_active(&engine->execlists); + if (!inflight) + goto unlock; + + /* + * If we are already the currently executing context, don't + * bother evaluating if we should preempt ourselves. + */ + if (inflight->context == rq->context) + goto unlock; + + engine->execlists.queue_priority_hint = prio; + if (need_preempt(prio, rq_prio(inflight))) + tasklet_hi_schedule(&engine->execlists.tasklet); + +unlock: + rcu_read_unlock(); } static void __i915_schedule(struct i915_sched_node *node, @@ -330,13 +347,8 @@ static void __i915_schedule(struct i915_sched_node *node, list_move_tail(&node->link, cache.priolist); } - if (prio <= engine->execlists.queue_priority_hint) - continue; - - engine->execlists.queue_priority_hint = prio; - /* Defer (tasklet) submission until after all of our updates. */ - kick_submission(engine, prio); + kick_submission(engine, node_to_request(node), prio); } spin_unlock(&engine->active.lock); @@ -375,9 +387,19 @@ void i915_sched_node_init(struct i915_sched_node *node) INIT_LIST_HEAD(&node->signalers_list); INIT_LIST_HEAD(&node->waiters_list); INIT_LIST_HEAD(&node->link); + + i915_sched_node_reinit(node); +} + +void i915_sched_node_reinit(struct i915_sched_node *node) +{ node->attr.priority = I915_PRIORITY_INVALID; node->semaphores = 0; node->flags = 0; + + GEM_BUG_ON(!list_empty(&node->signalers_list)); + GEM_BUG_ON(!list_empty(&node->waiters_list)); + GEM_BUG_ON(!list_empty(&node->link)); } static struct i915_dependency * @@ -462,13 +484,13 @@ void i915_sched_node_fini(struct i915_sched_node *node) * so we may be called out-of-order. */ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { - GEM_BUG_ON(!node_signaled(dep->signaler)); GEM_BUG_ON(!list_empty(&dep->dfs_link)); list_del(&dep->wait_link); if (dep->flags & I915_DEPENDENCY_ALLOC) i915_dependency_free(dep); } + INIT_LIST_HEAD(&node->signalers_list); /* Remove ourselves from everyone who depends upon us */ list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { @@ -479,6 +501,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) if (dep->flags & I915_DEPENDENCY_ALLOC) i915_dependency_free(dep); } + INIT_LIST_HEAD(&node->waiters_list); spin_unlock_irq(&schedule_lock); } diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 07d243acf553..d1dc4efef77b 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -26,6 +26,7 @@ sched.link) void i915_sched_node_init(struct i915_sched_node *node); +void i915_sched_node_reinit(struct i915_sched_node *node); bool __i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal, diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index 4d88205de51b..98bcb6fa0ab4 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -36,6 +36,7 @@ struct i915_selftest { char *filter; int mock; int live; + int perf; }; #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) @@ -45,6 +46,7 @@ extern struct i915_selftest i915_selftest; int i915_mock_selftests(void); int i915_live_selftests(struct pci_dev *pdev); +int i915_perf_selftests(struct pci_dev *pdev); /* We extract the function declarations from i915_mock_selftests.h and * i915_live_selftests.h Add your unit test declarations there! @@ -61,6 +63,7 @@ int i915_live_selftests(struct pci_dev *pdev); #undef selftest #define selftest(name, func) int func(struct drm_i915_private *i915); #include "selftests/i915_live_selftests.h" +#include "selftests/i915_perf_selftests.h" #undef selftest struct i915_subtest { @@ -109,6 +112,7 @@ int __i915_subtests(const char *caller, static inline int i915_mock_selftests(void) { return 0; } static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; } +static inline int i915_perf_selftests(struct pci_dev *pdev) { return 0; } #define I915_SELFTEST_DECLARE(x) #define I915_SELFTEST_ONLY(x) 0 diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6a88db291252..51ba97daf2a0 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -12,6 +12,12 @@ #include "i915_sw_fence.h" #include "i915_selftest.h" +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) +#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr) +#else +#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + #define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */ static DEFINE_SPINLOCK(i915_sw_fence_lock); @@ -218,13 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, { BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK); + __init_waitqueue_head(&fence->wait, name, key); + fence->flags = (unsigned long)fn; + + i915_sw_fence_reinit(fence); +} + +void i915_sw_fence_reinit(struct i915_sw_fence *fence) +{ debug_fence_init(fence); - __init_waitqueue_head(&fence->wait, name, key); atomic_set(&fence->pending, 1); fence->error = 0; - fence->flags = (unsigned long)fn; + I915_SW_FENCE_BUG_ON(!fence->flags); + I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head)); } void i915_sw_fence_commit(struct i915_sw_fence *fence) @@ -414,8 +428,10 @@ static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma, struct i915_sw_fence *fence; fence = xchg(&cb->base.fence, NULL); - if (fence) + if (fence) { + i915_sw_fence_set_error_once(fence, dma->error); i915_sw_fence_complete(fence); + } irq_work_queue(&cb->work); } @@ -443,8 +459,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); - if (dma_fence_is_signaled(dma)) + if (dma_fence_is_signaled(dma)) { + i915_sw_fence_set_error_once(fence, dma->error); return 0; + } cb = kmalloc(timeout ? sizeof(struct i915_sw_dma_fence_cb_timer) : @@ -454,7 +472,12 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, if (!gfpflags_allow_blocking(gfp)) return -ENOMEM; - return dma_fence_wait(dma, false); + ret = dma_fence_wait(dma, false); + if (ret) + return ret; + + i915_sw_fence_set_error_once(fence, dma->error); + return 0; } cb->fence = fence; @@ -504,8 +527,10 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, debug_fence_assert(fence); - if (dma_fence_is_signaled(dma)) + if (dma_fence_is_signaled(dma)) { + i915_sw_fence_set_error_once(fence, dma->error); return 0; + } cb->fence = fence; i915_sw_fence_await(fence); @@ -539,8 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(resv, - &excl, &count, &shared); + ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index ab7d58bd0b9d..19e806ce43bc 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -54,6 +54,8 @@ do { \ __i915_sw_fence_init((fence), (fn), NULL, NULL) #endif +void i915_sw_fence_reinit(struct i915_sw_fence *fence); + #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS void i915_sw_fence_fini(struct i915_sw_fence *fence); #else @@ -110,7 +112,8 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence) static inline void i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error) { - cmpxchg(&fence->error, 0, error); + if (unlikely(error)) + cmpxchg(&fence->error, 0, error); } #endif /* _I915_SW_FENCE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c index 07552cd544f2..997b2998f1f2 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c @@ -6,6 +6,13 @@ #include "i915_sw_fence_work.h" +static void fence_complete(struct dma_fence_work *f) +{ + if (f->ops->release) + f->ops->release(f); + dma_fence_signal(&f->dma); +} + static void fence_work(struct work_struct *work) { struct dma_fence_work *f = container_of(work, typeof(*f), work); @@ -14,7 +21,8 @@ static void fence_work(struct work_struct *work) err = f->ops->work(f); if (err) dma_fence_set_error(&f->dma, err); - dma_fence_signal(&f->dma); + + fence_complete(f); dma_fence_put(&f->dma); } @@ -32,7 +40,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) dma_fence_get(&f->dma); queue_work(system_unbound_wq, &f->work); } else { - dma_fence_signal(&f->dma); + fence_complete(f); } break; @@ -60,9 +68,6 @@ static void fence_release(struct dma_fence *fence) { struct dma_fence_work *f = container_of(fence, typeof(*f), dma); - if (f->ops->release) - f->ops->release(f); - i915_sw_fence_fini(&f->chain); BUILD_BUG_ON(offsetof(typeof(*f), dma)); @@ -78,12 +83,11 @@ static const struct dma_fence_ops fence_ops = { void dma_fence_work_init(struct dma_fence_work *f, const struct dma_fence_work_ops *ops) { + f->ops = ops; spin_lock_init(&f->lock); dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0); i915_sw_fence_init(&f->chain, fence_notify); INIT_WORK(&f->work, fence_work); - - f->ops = ops; } int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index bf039b8ba593..ad2b1b833d7b 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -31,6 +31,7 @@ #include <linux/sysfs.h> #include "gt/intel_rc6.h" +#include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_sysfs.h" @@ -258,44 +259,30 @@ static const struct bin_attribute dpf_attrs_1 = { static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - intel_wakeref_t wakeref; - u32 freq; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - vlv_punit_get(dev_priv); - freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - vlv_punit_put(dev_priv); - - freq = (freq >> 8) & 0xff; - } else { - freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); - } - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &i915->gt.rps; - return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq)); + return snprintf(buf, PAGE_SIZE, "%d\n", + intel_rps_read_actual_frequency(rps)); } static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &i915->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.cur_freq)); + intel_gpu_freq(rps, rps->cur_freq)); } static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &i915->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.boost_freq)); + intel_gpu_freq(rps, rps->boost_freq)); } static ssize_t gt_boost_freq_mhz_store(struct device *kdev, @@ -303,7 +290,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; bool boost = false; ssize_t ret; u32 val; @@ -313,7 +300,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, return ret; /* Validate against (static) hardware limits */ - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(rps, val); if (val < rps->min_freq || val > rps->max_freq) return -EINVAL; @@ -333,19 +320,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.efficient_freq)); + intel_gpu_freq(rps, rps->efficient_freq)); } static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.max_freq_softlimit)); + intel_gpu_freq(rps, rps->max_freq_softlimit)); } static ssize_t gt_max_freq_mhz_store(struct device *kdev, @@ -353,19 +340,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; - intel_wakeref_t wakeref; - u32 val; + struct intel_rps *rps = &dev_priv->gt.rps; ssize_t ret; + u32 val; ret = kstrtou32(buf, 0, &val); if (ret) return ret; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(rps, val); if (val < rps->min_freq || val > rps->max_freq || val < rps->min_freq_softlimit) { @@ -375,7 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (val > rps->rp0_freq) DRM_DEBUG("User requested overclocking to %d\n", - intel_gpu_freq(dev_priv, val)); + intel_gpu_freq(rps, val)); rps->max_freq_softlimit = val; @@ -383,14 +368,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, rps->min_freq_softlimit, rps->max_freq_softlimit); - /* We still need *_set_rps to process the new max_delay and + /* + * We still need *_set_rps to process the new max_delay and * update the interrupt limits and PMINTRMSK even though - * frequency request may be unchanged. */ - ret = intel_set_rps(dev_priv, val); + * frequency request may be unchanged. + */ + intel_rps_set(rps, val); unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -398,10 +384,10 @@ unlock: static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.min_freq_softlimit)); + intel_gpu_freq(rps, rps->min_freq_softlimit)); } static ssize_t gt_min_freq_mhz_store(struct device *kdev, @@ -409,19 +395,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; - intel_wakeref_t wakeref; - u32 val; + struct intel_rps *rps = &dev_priv->gt.rps; ssize_t ret; + u32 val; ret = kstrtou32(buf, 0, &val); if (ret) return ret; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(rps, val); if (val < rps->min_freq || val > rps->max_freq || val > rps->max_freq_softlimit) { @@ -435,14 +419,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, rps->min_freq_softlimit, rps->max_freq_softlimit); - /* We still need *_set_rps to process the new min_delay and + /* + * We still need *_set_rps to process the new min_delay and * update the interrupt limits and PMINTRMSK even though - * frequency request may be unchanged. */ - ret = intel_set_rps(dev_priv, val); + * frequency request may be unchanged. + */ + intel_rps_set(rps, val); unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -464,15 +449,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; u32 val; if (attr == &dev_attr_gt_RP0_freq_mhz) - val = intel_gpu_freq(dev_priv, rps->rp0_freq); + val = intel_gpu_freq(rps, rps->rp0_freq); else if (attr == &dev_attr_gt_RP1_freq_mhz) - val = intel_gpu_freq(dev_priv, rps->rp1_freq); + val = intel_gpu_freq(rps, rps->rp1_freq); else if (attr == &dev_attr_gt_RPn_freq_mhz) - val = intel_gpu_freq(dev_priv, rps->min_freq); + val = intel_gpu_freq(rps, rps->min_freq); else BUG(); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 7ef7a1e1664c..233a97a2c276 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -341,7 +341,7 @@ TRACE_EVENT(intel_disable_plane, /* pipe updates */ -TRACE_EVENT(i915_pipe_update_start, +TRACE_EVENT(intel_pipe_update_start, TP_PROTO(struct intel_crtc *crtc), TP_ARGS(crtc), @@ -366,7 +366,7 @@ TRACE_EVENT(i915_pipe_update_start, __entry->scanline, __entry->min, __entry->max) ); -TRACE_EVENT(i915_pipe_update_vblank_evaded, +TRACE_EVENT(intel_pipe_update_vblank_evaded, TP_PROTO(struct intel_crtc *crtc), TP_ARGS(crtc), @@ -391,7 +391,7 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded, __entry->scanline, __entry->min, __entry->max) ); -TRACE_EVENT(i915_pipe_update_end, +TRACE_EVENT(intel_pipe_update_end, TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), TP_ARGS(crtc, frame, scanline_end), diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index f66540e15793..c47261ae86ea 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -54,25 +54,54 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) static unsigned int i915_probe_fail_count; -int __i915_inject_load_error(struct drm_i915_private *i915, int err, - const char *func, int line) +int __i915_inject_probe_error(struct drm_i915_private *i915, int err, + const char *func, int line) { - if (i915_probe_fail_count >= i915_modparams.inject_load_failure) + if (i915_probe_fail_count >= i915_modparams.inject_probe_failure) return 0; - if (++i915_probe_fail_count < i915_modparams.inject_load_failure) + if (++i915_probe_fail_count < i915_modparams.inject_probe_failure) return 0; __i915_printk(i915, KERN_INFO, "Injecting failure %d at checkpoint %u [%s:%d]\n", - err, i915_modparams.inject_load_failure, func, line); - i915_modparams.inject_load_failure = 0; + err, i915_modparams.inject_probe_failure, func, line); + i915_modparams.inject_probe_failure = 0; return err; } bool i915_error_injected(void) { - return i915_probe_fail_count && !i915_modparams.inject_load_failure; + return i915_probe_fail_count && !i915_modparams.inject_probe_failure; } #endif + +void cancel_timer(struct timer_list *t) +{ + if (!READ_ONCE(t->expires)) + return; + + del_timer(t); + WRITE_ONCE(t->expires, 0); +} + +void set_timer_ms(struct timer_list *t, unsigned long timeout) +{ + if (!timeout) { + cancel_timer(t); + return; + } + + timeout = msecs_to_jiffies_timeout(timeout); + + /* + * Paranoia to make sure the compiler computes the timeout before + * loading 'jiffies' as jiffies is volatile and may be updated in + * the background by a timer tick. All to reduce the complexity + * of the addition and reduce the risk of losing a jiffie. + */ + barrier(); + + mod_timer(t, jiffies + timeout); +} diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 562f756da421..b0ade76bec90 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -32,6 +32,7 @@ #include <linux/workqueue.h> struct drm_i915_private; +struct timer_list; #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -60,20 +61,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -int __i915_inject_load_error(struct drm_i915_private *i915, int err, - const char *func, int line); -#define i915_inject_load_error(_i915, _err) \ - __i915_inject_load_error((_i915), (_err), __func__, __LINE__) +int __i915_inject_probe_error(struct drm_i915_private *i915, int err, + const char *func, int line); +#define i915_inject_probe_error(_i915, _err) \ + __i915_inject_probe_error((_i915), (_err), __func__, __LINE__) bool i915_error_injected(void); #else -#define i915_inject_load_error(_i915, _err) 0 +#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; }) #define i915_error_injected() false #endif -#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV) +#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV) #define i915_probe_error(i915, fmt, ...) \ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ @@ -421,4 +422,25 @@ static inline void add_taint_for_CI(unsigned int taint) add_taint(taint, LOCKDEP_STILL_OK); } +void cancel_timer(struct timer_list *t); +void set_timer_ms(struct timer_list *t, unsigned long timeout); + +static inline bool timer_expired(const struct timer_list *t) +{ + return READ_ONCE(t->expires) && !timer_pending(t); +} + +/* + * This is a lookalike for IS_ENABLED() that takes a kconfig value, + * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero + * i.e. whether the configuration is active. Wrapping up the config inside + * a boolean context prevents clang and smatch from complaining about potential + * issues in confusing logical-&& with bitwise-& for constants. + * + * Sadly IS_ENABLED() itself does not work with kconfig values. + * + * Returns 0 if @config is 0, 1 if set to any value. + */ +#define IS_ACTIVE(config) ((config) != 0) + #endif /* !__I915_UTILS_H */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e90c4d0af8fd..cbd783c31adb 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -28,7 +28,9 @@ #include "display/intel_frontbuffer.h" #include "gt/intel_engine.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_globals.h" @@ -106,12 +108,13 @@ vma_create(struct drm_i915_gem_object *obj, struct rb_node *rb, **p; /* The aliasing_ppgtt should never be used directly! */ - GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); + GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); vma = i915_vma_alloc(); if (vma == NULL) return ERR_PTR(-ENOMEM); + kref_init(&vma->ref); mutex_init(&vma->pages_mutex); vma->vm = i915_vm_get(vm); vma->ops = &vm->vma_ops; @@ -290,6 +293,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_vma_work { struct dma_fence_work base; struct i915_vma *vma; + struct drm_i915_gem_object *pinned; enum i915_cache_level cache_level; unsigned int flags; }; @@ -304,15 +308,21 @@ static int __vma_bind(struct dma_fence_work *work) if (err) atomic_or(I915_VMA_ERROR, &vma->flags); - if (vma->obj) - __i915_gem_object_unpin_pages(vma->obj); - return err; } +static void __vma_release(struct dma_fence_work *work) +{ + struct i915_vma_work *vw = container_of(work, typeof(*vw), base); + + if (vw->pinned) + __i915_gem_object_unpin_pages(vw->pinned); +} + static const struct dma_fence_work_ops bind_ops = { .name = "bind", .work = __vma_bind, + .release = __vma_release, }; struct i915_vma_work *i915_vma_work(void) @@ -393,8 +403,10 @@ int i915_vma_bind(struct i915_vma *vma, i915_active_set_exclusive(&vma->active, &work->base.dma); work->base.dma.error = 0; /* enable the queue_work() */ - if (vma->obj) + if (vma->obj) { __i915_gem_object_pin_pages(vma->obj); + work->pinned = vma->obj; + } } else { GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags); ret = vma->ops->bind_vma(vma, cache_level, bind_flags); @@ -412,7 +424,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) int err; /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); + assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm); if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { err = -ENODEV; goto err; @@ -700,41 +712,35 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); - list_add_tail(&vma->vm_link, &vma->vm->bound_list); - if (vma->obj) { - atomic_inc(&vma->obj->bind_count); - assert_bind_count(vma->obj); + struct drm_i915_gem_object *obj = vma->obj; + + atomic_inc(&obj->bind_count); + assert_bind_count(obj); } + list_add_tail(&vma->vm_link, &vma->vm->bound_list); return 0; } static void -i915_vma_remove(struct i915_vma *vma) +i915_vma_detach(struct i915_vma *vma) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); - list_del(&vma->vm_link); - /* - * Since the unbound list is global, only move to that list if - * no more VMAs exist. + * And finally now the object is completely decoupled from this + * vma, we can drop its hold on the backing storage and allow + * it to be reaped by the shrinker. */ + list_del(&vma->vm_link); if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; - /* - * And finally now the object is completely decoupled from this - * vma, we can drop its hold on the backing storage and allow - * it to be reaped by the shrinker. - */ - atomic_dec(&obj->bind_count); assert_bind_count(obj); + atomic_dec(&obj->bind_count); } - - drm_mm_remove_node(&vma->node); } static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) @@ -929,8 +935,10 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); err_remove: - if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) - i915_vma_remove(vma); + if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { + i915_vma_detach(vma); + drm_mm_remove_node(&vma->node); + } err_active: i915_active_release(&vma->active); err_unlock: @@ -943,9 +951,41 @@ err_pages: return err; } +static void flush_idle_contexts(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + intel_engine_flush_barriers(engine); + + intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); +} + +int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) +{ + struct i915_address_space *vm = vma->vm; + int err; + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + + do { + err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); + if (err != -ENOSPC) + return err; + + /* Unlike i915_vma_pin, we don't take no for an answer! */ + flush_idle_contexts(vm->gt); + if (mutex_lock_interruptible(&vm->mutex) == 0) { + i915_gem_evict_vm(vm); + mutex_unlock(&vm->mutex); + } + } while (1); +} + void i915_vma_close(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; + struct intel_gt *gt = vma->vm->gt; unsigned long flags; GEM_BUG_ON(i915_vma_is_closed(vma)); @@ -962,18 +1002,18 @@ void i915_vma_close(struct i915_vma *vma) * causing us to rebind the VMA once more. This ends up being a lot * of wasted work for the steady state. */ - spin_lock_irqsave(&i915->gt.closed_lock, flags); - list_add(&vma->closed_link, &i915->gt.closed_vma); - spin_unlock_irqrestore(&i915->gt.closed_lock, flags); + spin_lock_irqsave(>->closed_lock, flags); + list_add(&vma->closed_link, >->closed_vma); + spin_unlock_irqrestore(>->closed_lock, flags); } static void __i915_vma_remove_closed(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; + struct intel_gt *gt = vma->vm->gt; - spin_lock_irq(&i915->gt.closed_lock); + spin_lock_irq(>->closed_lock); list_del_init(&vma->closed_link); - spin_unlock_irq(&i915->gt.closed_lock); + spin_unlock_irq(>->closed_lock); } void i915_vma_reopen(struct i915_vma *vma) @@ -982,8 +1022,10 @@ void i915_vma_reopen(struct i915_vma *vma) __i915_vma_remove_closed(vma); } -void i915_vma_destroy(struct i915_vma *vma) +void i915_vma_release(struct kref *ref) { + struct i915_vma *vma = container_of(ref, typeof(*vma), ref); + if (drm_mm_node_allocated(&vma->node)) { mutex_lock(&vma->vm->mutex); atomic_and(~I915_VMA_PIN_MASK, &vma->flags); @@ -1009,12 +1051,12 @@ void i915_vma_destroy(struct i915_vma *vma) i915_vma_free(vma); } -void i915_vma_parked(struct drm_i915_private *i915) +void i915_vma_parked(struct intel_gt *gt) { struct i915_vma *vma, *next; - spin_lock_irq(&i915->gt.closed_lock); - list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { + spin_lock_irq(>->closed_lock); + list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { struct drm_i915_gem_object *obj = vma->obj; struct i915_address_space *vm = vma->vm; @@ -1023,26 +1065,28 @@ void i915_vma_parked(struct drm_i915_private *i915) if (!kref_get_unless_zero(&obj->base.refcount)) continue; - if (!i915_vm_tryopen(vm)) { + if (i915_vm_tryopen(vm)) { + list_del_init(&vma->closed_link); + } else { i915_gem_object_put(obj); obj = NULL; } - spin_unlock_irq(&i915->gt.closed_lock); + spin_unlock_irq(>->closed_lock); if (obj) { - i915_vma_destroy(vma); + __i915_vma_put(vma); i915_gem_object_put(obj); } i915_vm_close(vm); /* Restart after dropping lock */ - spin_lock_irq(&i915->gt.closed_lock); - next = list_first_entry(&i915->gt.closed_vma, + spin_lock_irq(>->closed_lock); + next = list_first_entry(>->closed_vma, typeof(*next), closed_link); } - spin_unlock_irq(&i915->gt.closed_lock); + spin_unlock_irq(>->closed_lock); } static void __i915_vma_iounmap(struct i915_vma *vma) @@ -1058,17 +1102,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma) void i915_vma_revoke_mmap(struct i915_vma *vma) { - struct drm_vma_offset_node *node = &vma->obj->base.vma_node; + struct drm_vma_offset_node *node; u64 vma_offset; - lockdep_assert_held(&vma->vm->mutex); - if (!i915_vma_has_userfault(vma)) return; GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); GEM_BUG_ON(!vma->obj->userfault_count); + node = &vma->mmo->vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, drm_vma_node_offset_addr(node) + vma_offset, @@ -1108,8 +1151,14 @@ int i915_vma_move_to_active(struct i915_vma *vma, return err; if (flags & EXEC_OBJECT_WRITE) { - if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) - i915_active_add_request(&obj->frontbuffer->write, rq); + struct intel_frontbuffer *front; + + front = __intel_frontbuffer_get(obj); + if (unlikely(front)) { + if (intel_frontbuffer_invalidate(front, ORIGIN_CS)) + i915_active_add_request(&front->write, rq); + intel_frontbuffer_put(front); + } dma_resv_add_excl_fence(vma->resv, &rq->fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; @@ -1150,7 +1199,7 @@ int __i915_vma_unbind(struct i915_vma *vma) GEM_BUG_ON(i915_vma_is_active(vma)); if (i915_vma_is_pinned(vma)) { vma_print_allocator(vma, "is pinned"); - return -EBUSY; + return -EAGAIN; } GEM_BUG_ON(i915_vma_is_active(vma)); @@ -1187,9 +1236,10 @@ int __i915_vma_unbind(struct i915_vma *vma) } atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags); + i915_vma_detach(vma); vma_unbind_pages(vma); - i915_vma_remove(vma); + drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ return 0; } @@ -1198,6 +1248,9 @@ int i915_vma_unbind(struct i915_vma *vma) struct i915_address_space *vm = vma->vm; int err; + if (!drm_mm_node_allocated(&vma->node)) + return 0; + err = mutex_lock_interruptible(&vm->mutex); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 858908e3d1cc..5fffa3c58908 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -51,18 +51,26 @@ enum i915_cache_level; */ struct i915_vma { struct drm_mm_node node; - struct drm_i915_gem_object *obj; + struct i915_address_space *vm; const struct i915_vma_ops *ops; - struct i915_fence_reg *fence; + + struct drm_i915_gem_object *obj; struct dma_resv *resv; /** Alias of obj->resv */ + struct sg_table *pages; void __iomem *iomap; void *private; /* owned by creator */ + + struct i915_fence_reg *fence; + u64 size; u64 display_alignment; struct i915_page_sizes page_sizes; + /* mmap-offset associated with fencing for this vma */ + struct i915_mmap_offset *mmo; + u32 fence_size; u32 fence_alignment; @@ -71,6 +79,7 @@ struct i915_vma { * handles (but same file) for execbuf, i.e. the number of aliases * that exist in the ctx->handle_vmas LUT for this vma. */ + struct kref ref; atomic_t open_count; atomic_t flags; /** @@ -333,7 +342,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma); void i915_vma_reopen(struct i915_vma *vma); -void i915_vma_destroy(struct i915_vma *vma); + +static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma) +{ + if (kref_get_unless_zero(&vma->ref)) + return vma; + + return NULL; +} + +void i915_vma_release(struct kref *ref); +static inline void __i915_vma_put(struct i915_vma *vma) +{ + kref_put(&vma->ref, i915_vma_release); +} #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) @@ -349,6 +371,7 @@ static inline void i915_vma_unlock(struct i915_vma *vma) int __must_check i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); +int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags); static inline int i915_vma_pin_count(const struct i915_vma *vma) { @@ -462,7 +485,7 @@ i915_vma_unpin_fence(struct i915_vma *vma) __i915_vma_unpin_fence(vma); } -void i915_vma_parked(struct drm_i915_private *i915); +void i915_vma_parked(struct intel_gt *gt); #define for_each_until(cond) if (cond) break; else diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 85e480bdc673..1acb5db77431 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -73,9 +73,30 @@ const char *intel_platform_name(enum intel_platform platform) return platform_names[platform]; } -void intel_device_info_dump_flags(const struct intel_device_info *info, - struct drm_printer *p) +static const char *iommu_name(void) { + const char *msg = "n/a"; + +#ifdef CONFIG_INTEL_IOMMU + msg = enableddisabled(intel_iommu_gfx_mapped); +#endif + + return msg; +} + +void intel_device_info_print_static(const struct intel_device_info *info, + struct drm_printer *p) +{ + drm_printf(p, "engines: %x\n", info->engine_mask); + drm_printf(p, "gen: %d\n", info->gen); + drm_printf(p, "gt: %d\n", info->gt); + drm_printf(p, "iommu: %s\n", iommu_name()); + drm_printf(p, "memory-regions: %x\n", info->memory_regions); + drm_printf(p, "page-sizes: %x\n", info->page_sizes); + drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); + drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size); + drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type); + #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG @@ -106,8 +127,8 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); } -void intel_device_info_dump_runtime(const struct intel_runtime_info *info, - struct drm_printer *p) +void intel_device_info_print_runtime(const struct intel_runtime_info *info, + struct drm_printer *p) { sseu_dump(&info->sseu, p); @@ -148,8 +169,8 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, } } -void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, - struct drm_printer *p) +void intel_device_info_print_topology(const struct sseu_dev_info *sseu, + struct drm_printer *p) { int s, ss; @@ -808,6 +829,8 @@ static const u16 subplatform_ult_ids[] = { INTEL_WHL_U_GT1_IDS(0), INTEL_WHL_U_GT2_IDS(0), INTEL_WHL_U_GT3_IDS(0), + INTEL_CML_U_GT1_IDS(0), + INTEL_CML_U_GT2_IDS(0), }; static const u16 subplatform_ulx_ids[] = { @@ -981,6 +1004,19 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) enabled_mask); else info->pipe_mask = enabled_mask; + + if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) + info->display.has_hdcp = 0; + + if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) + info->display.has_fbc = 0; + + if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) + info->display.has_csr = 0; + + if (INTEL_GEN(dev_priv) >= 10 && + (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE)) + info->display.has_dsc = 0; } /* Initialize slice/subslice/EU info */ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index e9940f932d26..2725cb7fc169 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -107,6 +107,7 @@ enum intel_ppgtt_type { func(is_mobile); \ func(is_lp); \ func(require_force_probe); \ + func(is_dgfx); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ func(gpu_reset_clobbers_display); \ @@ -136,8 +137,10 @@ enum intel_ppgtt_type { func(has_ddi); \ func(has_dp_mst); \ func(has_dsb); \ + func(has_dsc); \ func(has_fbc); \ func(has_gmch); \ + func(has_hdcp); \ func(has_hotplug); \ func(has_ipc); \ func(has_modular_fia); \ @@ -227,12 +230,13 @@ const char *intel_platform_name(enum intel_platform platform); void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv); void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); -void intel_device_info_dump_flags(const struct intel_device_info *info, - struct drm_printer *p); -void intel_device_info_dump_runtime(const struct intel_runtime_info *info, + +void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p); -void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, +void intel_device_info_print_runtime(const struct intel_runtime_info *info, struct drm_printer *p); +void intel_device_info_print_topology(const struct sseu_dev_info *sseu, + struct drm_printer *p); void intel_device_info_init_mmio(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 72f98a111de1..e24c280e5930 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -73,6 +73,9 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, min_order = ilog2(size) - ilog2(mem->mm.chunk_size); } + if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size) + return -E2BIG; + n_pages = size >> ilog2(mem->mm.chunk_size); mutex_lock(&mem->mm_lock); @@ -207,6 +210,65 @@ void intel_memory_region_put(struct intel_memory_region *mem) kref_put(&mem->kref, __intel_memory_region_destroy); } +/* Global memory region registration -- only slight layer inversions! */ + +int intel_memory_regions_hw_probe(struct drm_i915_private *i915) +{ + int err, i; + + for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { + struct intel_memory_region *mem = ERR_PTR(-ENODEV); + u32 type; + + if (!HAS_REGION(i915, BIT(i))) + continue; + + type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]); + switch (type) { + case INTEL_MEMORY_SYSTEM: + mem = i915_gem_shmem_setup(i915); + break; + case INTEL_MEMORY_STOLEN: + mem = i915_gem_stolen_setup(i915); + break; + case INTEL_MEMORY_LOCAL: + mem = intel_setup_fake_lmem(i915); + break; + } + + if (IS_ERR(mem)) { + err = PTR_ERR(mem); + DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type); + goto out_cleanup; + } + + mem->id = intel_region_map[i]; + mem->type = type; + mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]); + + i915->mm.regions[i] = mem; + } + + return 0; + +out_cleanup: + intel_memory_regions_driver_release(i915); + return err; +} + +void intel_memory_regions_driver_release(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { + struct intel_memory_region *region = + fetch_and_zero(&i915->mm.regions[i]); + + if (region) + intel_memory_region_put(region); + } +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/intel_memory_region.c" #include "selftests/mock_region.c" diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 49b059a2be70..238722009677 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -10,6 +10,7 @@ #include <linux/ioport.h> #include <linux/mutex.h> #include <linux/io-mapping.h> +#include <drm/drm_mm.h> #include "i915_buddy.h" @@ -71,6 +72,9 @@ struct intel_memory_region { struct io_mapping iomap; struct resource region; + /* For fake LMEM */ + struct drm_mm_node fake_mappable; + struct i915_buddy_mm mm; struct mutex mm_lock; @@ -83,6 +87,8 @@ struct intel_memory_region { unsigned int instance; unsigned int id; + dma_addr_t remap_addr; + struct { struct mutex lock; /* Protects access to objects */ struct list_head list; @@ -117,4 +123,7 @@ struct intel_memory_region * intel_memory_region_get(struct intel_memory_region *mem); void intel_memory_region_put(struct intel_memory_region *mem); +int intel_memory_regions_hw_probe(struct drm_i915_private *i915); +void intel_memory_regions_driver_release(struct drm_i915_private *i915); + #endif diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 1035d3d46fd8..43b68b5fc562 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -52,7 +52,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) return PCH_SPT; case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_KBP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); @@ -74,6 +75,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) WARN_ON(!IS_COFFEELAKE(dev_priv)); /* CometPoint is CNP Compatible */ return PCH_CNP; + case INTEL_PCH_CMP_V_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n"); + WARN_ON(!IS_COFFEELAKE(dev_priv)); + /* Comet Lake V PCH is based on KBP, which is SPT compatible */ + return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Ice Lake PCH\n"); WARN_ON(!IS_ICELAKE(dev_priv)); @@ -83,6 +89,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; case INTEL_PCH_TGP_DEVICE_ID_TYPE: + case INTEL_PCH_TGP2_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n"); WARN_ON(!IS_TIGERLAKE(dev_priv)); return PCH_TGP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index f4dc18c34291..3053d1ce398b 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -43,9 +43,11 @@ enum intel_pch { #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 +#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 +#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 #define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 08255a96e1ea..31ec82337e4f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -107,6 +107,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); + + /* + * Lower the display internal timeout. + * This is needed to avoid any hard hangs when DSI port PLL + * is off and a MMIO access is attempted by any privilege + * application, using batch buffers or any other means. + */ + I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -197,8 +205,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) break; } - dev_priv->ips.r_t = dev_priv->mem_freq; - switch (csipll & 0x3ff) { case 0x00c: dev_priv->fsb_freq = 3200; @@ -227,14 +233,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->fsb_freq = 0; break; } - - if (dev_priv->fsb_freq == 3200) { - dev_priv->ips.c_m = 0; - } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { - dev_priv->ips.c_m = 1; - } else { - dev_priv->ips.c_m = 2; - } } static const struct cxsr_latency cxsr_latency_table[] = { @@ -465,7 +463,7 @@ static const int pessimal_latency_ns = 5000; static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; enum pipe pipe = crtc->pipe; @@ -796,10 +794,10 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv) static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); /* FIXME check the 'enable' instead */ - if (!crtc_state->base.active) + if (!crtc_state->hw.active) return false; /* @@ -811,9 +809,28 @@ static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, * around this problem with the watermark code. */ if (plane->id == PLANE_CURSOR) - return plane_state->base.fb != NULL; + return plane_state->hw.fb != NULL; else - return plane_state->base.visible; + return plane_state->uapi.visible; +} + +static bool intel_crtc_active(struct intel_crtc *crtc) +{ + /* Be paranoid as we can arrive here with only partial + * state retrieved from the hardware during setup. + * + * We can ditch the adjusted_mode.crtc_clock check as soon + * as Haswell has gained clock readout/fastboot support. + * + * We can ditch the crtc->primary->state->fb check as soon as we can + * properly reconstruct framebuffers. + * + * FIXME: The intel_crtc->active here should be switched to + * crtc->state->active once we have proper CRTC states wired up + * for atomic. + */ + return crtc->active && crtc->base.primary->state->fb && + crtc->config->hw.adjusted_mode.crtc_clock; } static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) @@ -852,7 +869,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) crtc = single_enabled_crtc(dev_priv); if (crtc) { const struct drm_display_mode *adjusted_mode = - &crtc->config->base.adjusted_mode; + &crtc->config->hw.adjusted_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp = fb->format->cpp[0]; @@ -1085,10 +1102,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int level) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; unsigned int latency = dev_priv->wm.pri_latency[level] * 10; unsigned int clock, htotal, cpp, width, wm; @@ -1098,7 +1115,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = plane_state->base.fb->format->cpp[0]; + cpp = plane_state->hw.fb->format->cpp[0]; /* * Not 100% sure which way ELK should go here as the @@ -1118,7 +1135,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; - width = drm_rect_width(&plane_state->base.dst); + width = drm_rect_width(&plane_state->uapi.dst); if (plane->id == PLANE_CURSOR) { wm = intel_wm_method2(clock, htotal, width, cpp, latency); @@ -1145,7 +1162,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); bool dirty = false; for (; level < intel_wm_num_levels(dev_priv); level++) { @@ -1161,7 +1178,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, int level, u16 value) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); bool dirty = false; /* NORMAL level doesn't have an FBC watermark */ @@ -1184,7 +1201,7 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); enum plane_id plane_id = plane->id; bool dirty = false; @@ -1263,7 +1280,7 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (level > dev_priv->wm.max_level) return false; @@ -1301,9 +1318,9 @@ static void g4x_invalidate_wms(struct intel_crtc *crtc, static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->base.state); + to_intel_atomic_state(crtc_state->uapi.state); struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; int num_active_planes = hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); @@ -1318,8 +1335,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - if (new_plane_state->base.crtc != &crtc->base && - old_plane_state->base.crtc != &crtc->base) + if (new_plane_state->hw.crtc != &crtc->base && + old_plane_state->hw.crtc != &crtc->base) continue; if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) @@ -1390,17 +1407,17 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_crtc_state->base.state); + to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(intel_state, crtc); const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; enum plane_id plane_id; - if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) { + if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { *intermediate = *optimal; intermediate->cxsr = false; @@ -1530,10 +1547,11 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv) } static void g4x_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); mutex_lock(&dev_priv->wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; @@ -1542,10 +1560,11 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state, } static void g4x_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; @@ -1591,10 +1610,10 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int level) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; unsigned int clock, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) @@ -1603,7 +1622,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = plane_state->base.fb->format->cpp[0]; + cpp = plane_state->hw.fb->format->cpp[0]; clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; width = crtc_state->pipe_src_w; @@ -1632,7 +1651,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; @@ -1744,7 +1763,7 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, int level, enum plane_id plane_id, u16 value) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int num_levels = intel_wm_num_levels(dev_priv); bool dirty = false; @@ -1761,7 +1780,7 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); enum plane_id plane_id = plane->id; int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); int level; @@ -1819,16 +1838,16 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->base.state); + to_intel_atomic_state(crtc_state->uapi.state); struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; int num_active_planes = hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); - bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base); + bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi); const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; struct intel_plane *plane; @@ -1839,8 +1858,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - if (new_plane_state->base.crtc != &crtc->base && - old_plane_state->base.crtc != &crtc->base) + if (new_plane_state->hw.crtc != &crtc->base && + old_plane_state->hw.crtc != &crtc->base) continue; if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) @@ -1925,11 +1944,12 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) static void vlv_atomic_update_fifo(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_uncore *uncore = &dev_priv->uncore; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; int sprite0_start, sprite1_start, fifo_size; @@ -2023,17 +2043,17 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_crtc_state->base.state); + to_intel_atomic_state(new_crtc_state->uapi.state); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(intel_state, crtc); const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; int level; - if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) { + if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { *intermediate = *optimal; intermediate->cxsr = false; @@ -2149,10 +2169,11 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv) } static void vlv_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); mutex_lock(&dev_priv->wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; @@ -2161,10 +2182,11 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state, } static void vlv_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; @@ -2189,7 +2211,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) /* self-refresh has much higher latency */ static const int sr_latency_ns = 12000; const struct drm_display_mode *adjusted_mode = - &crtc->config->base.adjusted_mode; + &crtc->config->hw.adjusted_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int clock = adjusted_mode->crtc_clock; @@ -2270,7 +2292,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode = - &crtc->config->base.adjusted_mode; + &crtc->config->hw.adjusted_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2297,7 +2319,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode = - &crtc->config->base.adjusted_mode; + &crtc->config->hw.adjusted_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2345,7 +2367,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; const struct drm_display_mode *adjusted_mode = - &enabled->config->base.adjusted_mode; + &enabled->config->hw.adjusted_mode; const struct drm_framebuffer *fb = enabled->base.primary->state->fb; int clock = adjusted_mode->crtc_clock; @@ -2403,7 +2425,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) if (crtc == NULL) return; - adjusted_mode = &crtc->config->base.adjusted_mode; + adjusted_mode = &crtc->config->hw.adjusted_mode; planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, &i845_wm_info, dev_priv->display.get_fifo_size(dev_priv, PLANE_A), @@ -2485,7 +2507,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = plane_state->base.fb->format->cpp[0]; + cpp = plane_state->hw.fb->format->cpp[0]; method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); @@ -2493,8 +2515,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, return method1; method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->base.adjusted_mode.crtc_htotal, - drm_rect_width(&plane_state->base.dst), + crtc_state->hw.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.dst), cpp, mem_value); return min(method1, method2); @@ -2517,12 +2539,12 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = plane_state->base.fb->format->cpp[0]; + cpp = plane_state->hw.fb->format->cpp[0]; method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->base.adjusted_mode.crtc_htotal, - drm_rect_width(&plane_state->base.dst), + crtc_state->hw.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.dst), cpp, mem_value); return min(method1, method2); } @@ -2543,11 +2565,11 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = plane_state->base.fb->format->cpp[0]; + cpp = plane_state->hw.fb->format->cpp[0]; return ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->base.adjusted_mode.crtc_htotal, - drm_rect_width(&plane_state->base.dst), + crtc_state->hw.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.dst), cpp, mem_value); } @@ -2561,9 +2583,10 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = plane_state->base.fb->format->cpp[0]; + cpp = plane_state->hw.fb->format->cpp[0]; - return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp); + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst), + cpp); } static unsigned int @@ -2768,12 +2791,12 @@ static u32 hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state) { const struct intel_atomic_state *intel_state = - to_intel_atomic_state(crtc_state->base.state); + to_intel_atomic_state(crtc_state->uapi.state); const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; u32 linetime, ips_linetime; - if (!crtc_state->base.active) + if (!crtc_state->hw.active) return 0; if (WARN_ON(adjusted_mode->crtc_clock == 0)) return 0; @@ -3083,11 +3106,9 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, /* Compute new watermarks for the pipe */ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = crtc_state->base.state; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_pipe_wm *pipe_wm; - struct drm_device *dev = state->dev; - const struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane; const struct intel_plane_state *plane_state; const struct intel_plane_state *pristate = NULL; @@ -3107,12 +3128,12 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) curstate = plane_state; } - pipe_wm->pipe_enabled = crtc_state->base.active; + pipe_wm->pipe_enabled = crtc_state->hw.active; if (sprstate) { - pipe_wm->sprites_enabled = sprstate->base.visible; - pipe_wm->sprites_scaled = sprstate->base.visible && - (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 || - drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16); + pipe_wm->sprites_enabled = sprstate->uapi.visible; + pipe_wm->sprites_scaled = sprstate->uapi.visible && + (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 || + drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16); } usable_level = max_level; @@ -3164,11 +3185,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) */ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) { - struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; struct intel_atomic_state *intel_state = - to_intel_atomic_state(newstate->base.state); + to_intel_atomic_state(newstate->uapi.state); const struct intel_crtc_state *oldstate = intel_atomic_get_old_crtc_state(intel_state, intel_crtc); const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; @@ -3180,7 +3201,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) * and after the vblank. */ *a = newstate->wm.ilk.optimal; - if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) || + if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) || intel_state->skip_intermediate_wm) return 0; @@ -3590,10 +3611,8 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, dev_priv->wm.hw = *results; } -bool ilk_disable_lp_wm(struct drm_device *dev) +bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } @@ -3782,7 +3801,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); - if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) return false; for_each_intel_plane_on_crtc(dev, crtc, plane) { @@ -3832,7 +3851,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) < 11) return ddb_size - 4; /* 4 blocks for bypass path allocation */ - adjusted_mode = &crtc_state->base.adjusted_mode; + adjusted_mode = &crtc_state->hw.adjusted_mode; total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode); /* @@ -3861,16 +3880,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, struct skl_ddb_entry *alloc, /* out */ int *num_active /* out */) { - struct drm_atomic_state *state = crtc_state->base.state; + struct drm_atomic_state *state = crtc_state->uapi.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *for_crtc = crtc_state->base.crtc; + struct drm_crtc *for_crtc = crtc_state->uapi.crtc; const struct intel_crtc *crtc; u32 pipe_width = 0, total_width = 0, width_before_pipe = 0; enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; u16 ddb_size; u32 i; - if (WARN_ON(!state) || !crtc_state->base.active) { + if (WARN_ON(!state) || !crtc_state->hw.active) { alloc->start = 0; alloc->end = 0; *num_active = hweight8(dev_priv->active_pipes); @@ -3909,11 +3928,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, */ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; + &crtc_state->hw.adjusted_mode; enum pipe pipe = crtc->pipe; int hdisplay, vdisplay; - if (!crtc_state->base.enable) + if (!crtc_state->hw.enable) continue; drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); @@ -3944,7 +3963,7 @@ static unsigned int skl_cursor_allocation(const struct intel_crtc_state *crtc_state, int num_active) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level wm = {}; int ret, min_ddb_alloc = 0; @@ -4084,10 +4103,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, * * n.b., src is 16.16 fixed point, dst is whole integer. */ - src_w = drm_rect_width(&plane_state->base.src) >> 16; - src_h = drm_rect_height(&plane_state->base.src) >> 16; - dst_w = drm_rect_width(&plane_state->base.dst); - dst_h = drm_rect_height(&plane_state->base.dst); + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + dst_w = drm_rect_width(&plane_state->uapi.dst); + dst_h = drm_rect_height(&plane_state->uapi.dst); fp_w_ratio = div_fixed16(src_w, dst_w); fp_h_ratio = div_fixed16(src_h, dst_h); @@ -4097,113 +4116,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, return mul_fixed16(downscale_w, downscale_h); } -static uint_fixed_16_16_t -skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) -{ - uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1); - - if (!crtc_state->base.enable) - return pipe_downscale; - - if (crtc_state->pch_pfit.enabled) { - u32 src_w, src_h, dst_w, dst_h; - u32 pfit_size = crtc_state->pch_pfit.size; - uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; - uint_fixed_16_16_t downscale_h, downscale_w; - - src_w = crtc_state->pipe_src_w; - src_h = crtc_state->pipe_src_h; - dst_w = pfit_size >> 16; - dst_h = pfit_size & 0xffff; - - if (!dst_w || !dst_h) - return pipe_downscale; - - fp_w_ratio = div_fixed16(src_w, dst_w); - fp_h_ratio = div_fixed16(src_h, dst_h); - downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); - downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); - - pipe_downscale = mul_fixed16(downscale_w, downscale_h); - } - - return pipe_downscale; -} - -int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct drm_atomic_state *state = crtc_state->base.state; - const struct intel_plane_state *plane_state; - struct intel_plane *plane; - int crtc_clock, dotclk; - u32 pipe_max_pixel_rate; - uint_fixed_16_16_t pipe_downscale; - uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); - - if (!crtc_state->base.enable) - return 0; - - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { - uint_fixed_16_16_t plane_downscale; - uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8); - int bpp; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - continue; - - if (WARN_ON(!plane_state->base.fb)) - return -EINVAL; - - plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state); - bpp = plane_state->base.fb->format->cpp[0] * 8; - if (bpp == 64) - plane_downscale = mul_fixed16(plane_downscale, - fp_9_div_8); - - max_downscale = max_fixed16(plane_downscale, max_downscale); - } - pipe_downscale = skl_pipe_downscale_amount(crtc_state); - - pipe_downscale = mul_fixed16(pipe_downscale, max_downscale); - - crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; - dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk; - - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) - dotclk *= 2; - - pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale); - - if (pipe_max_pixel_rate < crtc_clock) { - DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n"); - return -EINVAL; - } - - return 0; -} - static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); - const struct drm_framebuffer *fb = plane_state->base.fb; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; u32 data_rate; u32 width = 0, height = 0; uint_fixed_16_16_t down_scale_amount; u64 rate; - if (!plane_state->base.visible) + if (!plane_state->uapi.visible) return 0; if (plane->id == PLANE_CURSOR) return 0; if (color_plane == 1 && - !drm_format_info_is_yuv_semiplanar(fb->format)) + !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) return 0; /* @@ -4211,8 +4143,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - width = drm_rect_width(&plane_state->base.src) >> 16; - height = drm_rect_height(&plane_state->base.src) >> 16; + width = drm_rect_width(&plane_state->uapi.src) >> 16; + height = drm_rect_height(&plane_state->uapi.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ if (color_plane == 1) { @@ -4235,7 +4167,7 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate, u64 *uv_plane_data_rate) { - struct drm_atomic_state *state = crtc_state->base.state; + struct drm_atomic_state *state = crtc_state->uapi.state; struct intel_plane *plane; const struct intel_plane_state *plane_state; u64 total_data_rate = 0; @@ -4270,7 +4202,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state; u64 total_data_rate = 0; - if (WARN_ON(!crtc_state->base.state)) + if (WARN_ON(!crtc_state->uapi.state)) return 0; /* Calculate and cache data rate for each plane */ @@ -4314,8 +4246,8 @@ static int skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, struct skl_ddb_allocation *ddb /* out */) { - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_crtc *crtc = crtc_state->base.crtc; + struct drm_atomic_state *state = crtc_state->uapi.state; + struct drm_crtc *crtc = crtc_state->uapi.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; @@ -4337,7 +4269,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, if (WARN_ON(!state)) return 0; - if (!crtc_state->base.active) { + if (!crtc_state->hw.active) { alloc->start = alloc->end = 0; return 0; } @@ -4380,8 +4312,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { - if (WARN_ON(wm->wm[level].min_ddb_alloc > - total[PLANE_CURSOR])) { + if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { + WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; break; } @@ -4579,7 +4511,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state) u32 crtc_htotal; uint_fixed_16_16_t linetime_us; - if (!crtc_state->base.active) + if (!crtc_state->hw.active) return u32_to_fixed16(0); pixel_rate = crtc_state->pixel_rate; @@ -4587,7 +4519,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state) if (WARN_ON(pixel_rate == 0)) return u32_to_fixed16(0); - crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal; + crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal; linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); return linetime_us; @@ -4622,12 +4554,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 interm_pbpl; /* only planar format has two planes */ - if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) { + if (color_plane == 1 && + !intel_format_info_is_yuv_semiplanar(format, modifier)) { DRM_DEBUG_KMS("Non planar format have single plane\n"); return -EINVAL; } @@ -4639,7 +4572,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = drm_format_info_is_yuv_semiplanar(format); + wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); wp->width = width; if (color_plane == 1 && wp->is_planar) @@ -4711,7 +4644,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct skl_wm_params *wp, int color_plane) { - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; int width; /* @@ -4719,11 +4652,11 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - width = drm_rect_width(&plane_state->base.src) >> 16; + width = drm_rect_width(&plane_state->uapi.src) >> 16; return skl_compute_wm_params(crtc_state, width, fb->format, fb->modifier, - plane_state->base.rotation, + plane_state->hw.rotation, skl_adjusted_plane_pixel_rate(crtc_state, plane_state), wp, color_plane); } @@ -4743,7 +4676,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; @@ -4769,14 +4702,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, - crtc_state->base.adjusted_mode.crtc_htotal, + crtc_state->hw.adjusted_mode.crtc_htotal, latency, wp->plane_blocks_per_line); if (wp->y_tiled) { selected_result = max_fixed16(method2, wp->y_tile_minimum); } else { - if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal / + if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal / wp->dbuf_block_size < 1) && (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; @@ -4867,7 +4800,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level *result_prev = &levels[0]; @@ -4884,7 +4817,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, static u32 skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = crtc_state->base.state; + struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_i915_private *dev_priv = to_i915(state->dev); uint_fixed_16_16_t linetime_us; u32 linetime_wm; @@ -4903,7 +4836,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wp, struct skl_plane_wm *wm) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_device *dev = crtc_state->uapi.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); u16 trans_min, trans_y_tile_min; const u16 trans_amount = 10; /* This is configurable amount */ @@ -5001,8 +4934,8 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); - const struct drm_framebuffer *fb = plane_state->base.fb; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id plane_id = plane->id; int ret; @@ -5027,7 +4960,7 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id; + enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id; int ret; /* Watermarks calculated in master */ @@ -5035,7 +4968,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; if (plane_state->planar_linked_plane) { - const struct drm_framebuffer *fb = plane_state->base.fb; + const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id y_plane_id = plane_state->planar_linked_plane->id; WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); @@ -5063,7 +4996,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; struct intel_plane *plane; const struct intel_plane_state *plane_state; @@ -5240,8 +5173,8 @@ static int skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { - struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state); - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_plane *plane; @@ -5445,7 +5378,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state) if (ret) return ret; - state->active_pipe_changes = ~0; + state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask; /* * We usually only initialize state->active_pipes if we @@ -5471,7 +5404,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state) * to grab the lock on *all* CRTC's. */ if (state->active_pipe_changes || state->modeset) { - state->wm_results.dirty_pipes = ~0; + state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask; ret = intel_add_all_pipes(state); if (ret) @@ -5525,7 +5458,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, * power well the hardware state will go out of sync * with the software state. */ - if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) && + if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && skl_plane_wm_equals(dev_priv, &old_crtc_state->wm.skl.optimal.planes[plane_id], &new_crtc_state->wm.skl.optimal.planes[plane_id])) @@ -5589,11 +5522,12 @@ skl_compute_wm(struct intel_atomic_state *state) } static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; enum pipe pipe = crtc->pipe; if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0) @@ -5603,10 +5537,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, } static void skl_initial_wm(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct skl_ddb_values *results = &state->wm_results; if ((results->dirty_pipes & BIT(crtc->pipe)) == 0) @@ -5614,8 +5549,8 @@ static void skl_initial_wm(struct intel_atomic_state *state, mutex_lock(&dev_priv->wm.wm_mutex); - if (crtc_state->base.active_changed) - skl_atomic_update_crtc_wm(state, crtc_state); + if (crtc_state->uapi.active_changed) + skl_atomic_update_crtc_wm(state, crtc); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5671,10 +5606,11 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) } static void ilk_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); mutex_lock(&dev_priv->wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; @@ -5683,10 +5619,11 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state, } static void ilk_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); if (!crtc_state->wm.need_postvbl_update) return; @@ -6027,7 +5964,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) enum plane_id plane_id = plane->id; int level; - if (plane_state->base.visible) + if (plane_state->uapi.visible) continue; for (level = 0; level < 3; level++) { @@ -6182,7 +6119,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) enum plane_id plane_id = plane->id; int level; - if (plane_state->base.visible) + if (plane_state->uapi.visible) continue; for (level = 0; level < wm_state->num_levels; level++) { @@ -6339,1627 +6276,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv) intel_enable_ipc(dev_priv); } -/* - * Lock protecting IPS related data structures - */ -DEFINE_SPINLOCK(mchdev_lock); - -bool ironlake_set_drps(struct drm_i915_private *i915, u8 val) -{ - struct intel_uncore *uncore = &i915->uncore; - u16 rgvswctl; - - lockdep_assert_held(&mchdev_lock); - - rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); - if (rgvswctl & MEMCTL_CMD_STS) { - DRM_DEBUG("gpu busy, RCS change rejected\n"); - return false; /* still busy with another command */ - } - - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); - intel_uncore_posting_read16(uncore, MEMSWCTL); - - rgvswctl |= MEMCTL_CMD_STS; - intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); - - return true; -} - -static void ironlake_enable_drps(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 rgvmodectl; - u8 fmax, fmin, fstart, vstart; - - spin_lock_irq(&mchdev_lock); - - rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); - - /* Enable temp reporting */ - intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN); - intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE); - - /* 100ms RC evaluation intervals */ - intel_uncore_write(uncore, RCUPEI, 100000); - intel_uncore_write(uncore, RCDNEI, 100000); - - /* Set max/min thresholds to 90ms and 80ms respectively */ - intel_uncore_write(uncore, RCBMAXAVG, 90000); - intel_uncore_write(uncore, RCBMINAVG, 80000); - - intel_uncore_write(uncore, MEMIHYST, 1); - - /* Set up min, max, and cur for interrupt handling */ - fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; - fmin = (rgvmodectl & MEMMODE_FMIN_MASK); - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; - - vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & - PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - - dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ - dev_priv->ips.fstart = fstart; - - dev_priv->ips.max_delay = fstart; - dev_priv->ips.min_delay = fmin; - dev_priv->ips.cur_delay = fstart; - - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", - fmax, fmin, fstart); - - intel_uncore_write(uncore, - MEMINTREN, - MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); - - /* - * Interrupts will be enabled in ironlake_irq_postinstall - */ - - intel_uncore_write(uncore, VIDSTART, vstart); - intel_uncore_posting_read(uncore, VIDSTART); - - rgvmodectl |= MEMMODE_SWMODE_EN; - intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); - - if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & - MEMCTL_CMD_STS) == 0, 10)) - DRM_ERROR("stuck trying to change perf mode\n"); - mdelay(1); - - ironlake_set_drps(dev_priv, fstart); - - dev_priv->ips.last_count1 = - intel_uncore_read(uncore, DMIEC) + - intel_uncore_read(uncore, DDREC) + - intel_uncore_read(uncore, CSIEC); - dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); - dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC); - dev_priv->ips.last_time2 = ktime_get_raw_ns(); - - spin_unlock_irq(&mchdev_lock); -} - -static void ironlake_disable_drps(struct drm_i915_private *i915) -{ - struct intel_uncore *uncore = &i915->uncore; - u16 rgvswctl; - - spin_lock_irq(&mchdev_lock); - - rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); - - /* Ack interrupts, disable EFC interrupt */ - intel_uncore_write(uncore, - MEMINTREN, - intel_uncore_read(uncore, MEMINTREN) & - ~MEMINT_EVAL_CHG_EN); - intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); - intel_uncore_write(uncore, - DEIER, - intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); - intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); - intel_uncore_write(uncore, - DEIMR, - intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); - - /* Go back to the starting frequency */ - ironlake_set_drps(i915, i915->ips.fstart); - mdelay(1); - rgvswctl |= MEMCTL_CMD_STS; - intel_uncore_write(uncore, MEMSWCTL, rgvswctl); - mdelay(1); - - spin_unlock_irq(&mchdev_lock); -} - -/* There's a funny hw issue where the hw returns all 0 when reading from - * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value - * ourselves, instead of doing a rmw cycle (which might result in us clearing - * all limits and the gpu stuck at whatever frequency it is at atm). - */ -static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 limits; - - /* Only set the down limit when we've reached the lowest level to avoid - * getting more interrupts, otherwise leave this clear. This prevents a - * race in the hw when coming out of rc6: There's a tiny window where - * the hw runs at the minimal clock before selecting the desired - * frequency, if the down threshold expires in that window we will not - * receive a down interrupt. */ - if (INTEL_GEN(dev_priv) >= 9) { - limits = (rps->max_freq_softlimit) << 23; - if (val <= rps->min_freq_softlimit) - limits |= (rps->min_freq_softlimit) << 14; - } else { - limits = rps->max_freq_softlimit << 24; - if (val <= rps->min_freq_softlimit) - limits |= rps->min_freq_softlimit << 16; - } - - return limits; -} - -static void rps_set_power(struct drm_i915_private *dev_priv, int new_power) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 threshold_up = 0, threshold_down = 0; /* in % */ - u32 ei_up = 0, ei_down = 0; - - lockdep_assert_held(&rps->power.mutex); - - if (new_power == rps->power.mode) - return; - - /* Note the units here are not exactly 1us, but 1280ns. */ - switch (new_power) { - case LOW_POWER: - /* Upclock if more than 95% busy over 16ms */ - ei_up = 16000; - threshold_up = 95; - - /* Downclock if less than 85% busy over 32ms */ - ei_down = 32000; - threshold_down = 85; - break; - - case BETWEEN: - /* Upclock if more than 90% busy over 13ms */ - ei_up = 13000; - threshold_up = 90; - - /* Downclock if less than 75% busy over 32ms */ - ei_down = 32000; - threshold_down = 75; - break; - - case HIGH_POWER: - /* Upclock if more than 85% busy over 10ms */ - ei_up = 10000; - threshold_up = 85; - - /* Downclock if less than 60% busy over 32ms */ - ei_down = 32000; - threshold_down = 60; - break; - } - - /* When byt can survive without system hang with dynamic - * sw freq adjustments, this restriction can be lifted. - */ - if (IS_VALLEYVIEW(dev_priv)) - goto skip_hw_write; - - I915_WRITE(GEN6_RP_UP_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_up)); - I915_WRITE(GEN6_RP_UP_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, - ei_up * threshold_up / 100)); - - I915_WRITE(GEN6_RP_DOWN_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_down)); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, - ei_down * threshold_down / 100)); - - I915_WRITE(GEN6_RP_CONTROL, - (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - -skip_hw_write: - rps->power.mode = new_power; - rps->power.up_threshold = threshold_up; - rps->power.down_threshold = threshold_down; -} - -static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - int new_power; - - new_power = rps->power.mode; - switch (rps->power.mode) { - case LOW_POWER: - if (val > rps->efficient_freq + 1 && - val > rps->cur_freq) - new_power = BETWEEN; - break; - - case BETWEEN: - if (val <= rps->efficient_freq && - val < rps->cur_freq) - new_power = LOW_POWER; - else if (val >= rps->rp0_freq && - val > rps->cur_freq) - new_power = HIGH_POWER; - break; - - case HIGH_POWER: - if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && - val < rps->cur_freq) - new_power = BETWEEN; - break; - } - /* Max/min bins are special */ - if (val <= rps->min_freq_softlimit) - new_power = LOW_POWER; - if (val >= rps->max_freq_softlimit) - new_power = HIGH_POWER; - - mutex_lock(&rps->power.mutex); - if (rps->power.interactive) - new_power = HIGH_POWER; - rps_set_power(dev_priv, new_power); - mutex_unlock(&rps->power.mutex); -} - -void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive) -{ - struct intel_rps *rps = &i915->gt_pm.rps; - - if (INTEL_GEN(i915) < 6) - return; - - mutex_lock(&rps->power.mutex); - if (interactive) { - if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake)) - rps_set_power(i915, HIGH_POWER); - } else { - GEM_BUG_ON(!rps->power.interactive); - rps->power.interactive--; - } - mutex_unlock(&rps->power.mutex); -} - -static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 mask = 0; - - /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ - if (val > rps->min_freq_softlimit) - mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; - if (val < rps->max_freq_softlimit) - mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; - - mask &= dev_priv->pm_rps_events; - - return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); -} - -/* gen6_set_rps is called to update the frequency request, but should also be - * called when the range (min_delay and max_delay) is modified so that we can - * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ -static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* min/max delay may still have been modified so be sure to - * write the limits value. - */ - if (val != rps->cur_freq) { - gen6_set_rps_thresholds(dev_priv, val); - - if (INTEL_GEN(dev_priv) >= 9) - I915_WRITE(GEN6_RPNSWREQ, - GEN9_FREQUENCY(val)); - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - I915_WRITE(GEN6_RPNSWREQ, - HSW_FREQUENCY(val)); - else - I915_WRITE(GEN6_RPNSWREQ, - GEN6_FREQUENCY(val) | - GEN6_OFFSET(0) | - GEN6_AGGRESSIVE_TURBO); - } - - /* Make sure we continue to get interrupts - * until we hit the minimum or maximum frequencies. - */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); - I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); - - rps->cur_freq = val; - trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); - - return 0; -} - -static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) -{ - int err; - - if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), - "Odd GPU freq value\n")) - val &= ~1; - - I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); - - if (val != dev_priv->gt_pm.rps.cur_freq) { - vlv_punit_get(dev_priv); - err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); - vlv_punit_put(dev_priv); - if (err) - return err; - - gen6_set_rps_thresholds(dev_priv, val); - } - - dev_priv->gt_pm.rps.cur_freq = val; - trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); - - return 0; -} - -/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down - * - * * If Gfx is Idle, then - * 1. Forcewake Media well. - * 2. Request idle freq. - * 3. Release Forcewake of Media well. -*/ -static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 val = rps->idle_freq; - int err; - - if (rps->cur_freq <= val) - return; - - /* The punit delays the write of the frequency and voltage until it - * determines the GPU is awake. During normal usage we don't want to - * waste power changing the frequency if the GPU is sleeping (rc6). - * However, the GPU and driver is now idle and we do not want to delay - * switching to minimum voltage (reducing power whilst idle) as we do - * not expect to be woken in the near future and so must flush the - * change by waking the device. - * - * We choose to take the media powerwell (either would do to trick the - * punit into committing the voltage change) as that takes a lot less - * power than the render powerwell. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA); - err = valleyview_set_rps(dev_priv, val); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA); - - if (err) - DRM_ERROR("Failed to set RPS for idle\n"); -} - -void gen6_rps_busy(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - mutex_lock(&rps->lock); - if (rps->enabled) { - u8 freq; - - if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) - gen6_rps_reset_ei(dev_priv); - I915_WRITE(GEN6_PMINTRMSK, - gen6_rps_pm_mask(dev_priv, rps->cur_freq)); - - gen6_enable_rps_interrupts(dev_priv); - - /* Use the user's desired frequency as a guide, but for better - * performance, jump directly to RPe as our starting frequency. - */ - freq = max(rps->cur_freq, - rps->efficient_freq); - - if (intel_set_rps(dev_priv, - clamp(freq, - rps->min_freq_softlimit, - rps->max_freq_softlimit))) - DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); - } - mutex_unlock(&rps->lock); -} - -void gen6_rps_idle(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* Flush our bottom-half so that it does not race with us - * setting the idle frequency and so that it is bounded by - * our rpm wakeref. And then disable the interrupts to stop any - * futher RPS reclocking whilst we are asleep. - */ - gen6_disable_rps_interrupts(dev_priv); - - mutex_lock(&rps->lock); - if (rps->enabled) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_set_rps_idle(dev_priv); - else - gen6_set_rps(dev_priv, rps->idle_freq); - rps->last_adj = 0; - I915_WRITE(GEN6_PMINTRMSK, - gen6_sanitize_rps_pm_mask(dev_priv, ~0)); - } - mutex_unlock(&rps->lock); -} - -void gen6_rps_boost(struct i915_request *rq) -{ - struct intel_rps *rps = &rq->i915->gt_pm.rps; - unsigned long flags; - bool boost; - - /* This is intentionally racy! We peek at the state here, then - * validate inside the RPS worker. - */ - if (!rps->enabled) - return; - - if (i915_request_signaled(rq)) - return; - - /* Serializes with i915_request_retire() */ - boost = false; - spin_lock_irqsave(&rq->lock, flags); - if (!i915_request_has_waitboost(rq) && - !dma_fence_is_signaled_locked(&rq->fence)) { - boost = !atomic_fetch_inc(&rps->num_waiters); - rq->flags |= I915_REQUEST_WAITBOOST; - } - spin_unlock_irqrestore(&rq->lock, flags); - if (!boost) - return; - - if (READ_ONCE(rps->cur_freq) < rps->boost_freq) - schedule_work(&rps->work); - - atomic_inc(&rps->boosts); -} - -int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - int err; - - lockdep_assert_held(&rps->lock); - GEM_BUG_ON(val > rps->max_freq); - GEM_BUG_ON(val < rps->min_freq); - - if (!rps->enabled) { - rps->cur_freq = val; - return 0; - } - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - err = valleyview_set_rps(dev_priv, val); - else - err = gen6_set_rps(dev_priv, val); - - return err; -} - -static void gen9_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void gen6_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void cherryview_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void valleyview_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* All of these values are in units of 50MHz */ - - /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_GEN9_LP(dev_priv)) { - u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); - rps->rp0_freq = (rp_state_cap >> 16) & 0xff; - rps->rp1_freq = (rp_state_cap >> 8) & 0xff; - rps->min_freq = (rp_state_cap >> 0) & 0xff; - } else { - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - rps->rp0_freq = (rp_state_cap >> 0) & 0xff; - rps->rp1_freq = (rp_state_cap >> 8) & 0xff; - rps->min_freq = (rp_state_cap >> 16) & 0xff; - } - /* hw_max = RP0 until we check for overclocking */ - rps->max_freq = rps->rp0_freq; - - rps->efficient_freq = rps->rp1_freq; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || - IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - u32 ddcc_status = 0; - - if (sandybridge_pcode_read(dev_priv, - HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, - &ddcc_status, NULL) == 0) - rps->efficient_freq = - clamp_t(u8, - ((ddcc_status >> 8) & 0xff), - rps->min_freq, - rps->max_freq); - } - - if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - /* Store the frequency values in 16.66 MHZ units, which is - * the natural hardware unit for SKL - */ - rps->rp0_freq *= GEN9_FREQ_SCALER; - rps->rp1_freq *= GEN9_FREQ_SCALER; - rps->min_freq *= GEN9_FREQ_SCALER; - rps->max_freq *= GEN9_FREQ_SCALER; - rps->efficient_freq *= GEN9_FREQ_SCALER; - } -} - -static void reset_rps(struct drm_i915_private *dev_priv, - int (*set)(struct drm_i915_private *, u8)) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u8 freq = rps->cur_freq; - - /* force a reset */ - rps->power.mode = -1; - rps->cur_freq = -1; - - if (set(dev_priv, freq)) - DRM_ERROR("Failed to reset RPS to initial values\n"); -} - -/* See the Gen9_GT_PM_Programming_Guide doc for the below */ -static void gen9_enable_rps(struct drm_i915_private *dev_priv) -{ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Program defaults and thresholds for RPS */ - if (IS_GEN(dev_priv, 9)) - I915_WRITE(GEN6_RC_VIDEO_FREQ, - GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq)); - - /* 1 second timeout*/ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, - GT_INTERVAL_FROM_US(dev_priv, 1000000)); - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); - - /* Leaning on the below call to gen6_set_rps to program/setup the - * Up/Down EI & threshold registers, as well as the RP_CONTROL, - * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ - reset_rps(dev_priv, gen6_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen8_enable_rps(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 1 Program defaults and thresholds for RPS*/ - I915_WRITE(GEN6_RPNSWREQ, - HSW_FREQUENCY(rps->rp1_freq)); - I915_WRITE(GEN6_RC_VIDEO_FREQ, - HSW_FREQUENCY(rps->rp1_freq)); - /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ - - /* Docs recommend 900MHz, and 300 MHz respectively */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - rps->max_freq_softlimit << 24 | - rps->min_freq_softlimit << 16); - - I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ - I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ - I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - /* 2: Enable RPS */ - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - - reset_rps(dev_priv, gen6_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen6_enable_rps(struct drm_i915_private *dev_priv) -{ - /* Here begins a magic sequence of register writes to enable - * auto-downclocking. - * - * Perhaps there might be some value in exposing these to - * userspace... - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Power down if completely idle for over 50ms */ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - reset_rps(dev_priv, gen6_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp0; - - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - - switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) { - case 8: - /* (2 * 4) config */ - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); - break; - case 12: - /* (2 * 6) config */ - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); - break; - case 16: - /* (2 * 8) config */ - default: - /* Setting (2 * 8) Min RP0 for any other combination */ - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); - break; - } - - rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); - - return rp0; -} - -static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rpe; - - val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); - rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; - - return rpe; -} - -static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp1; - - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - rp1 = (val & FB_GFX_FREQ_FUSE_MASK); - - return rp1; -} - -static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rpn; - - val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE); - rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) & - FB_GFX_FREQ_FUSE_MASK); - - return rpn; -} - -static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp1; - - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); - - rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; - - return rp1; -} - -static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp0; - - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); - - rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; - /* Clamp to max */ - rp0 = min_t(u32, rp0, 0xea); - - return rp0; -} - -static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rpe; - - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); - rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); - rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; - - return rpe; -} - -static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; - /* - * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value - * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on - * a BYT-M B0 the above register contains 0xbf. Moreover when setting - * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 - * to make sure it matches what Punit accepts. - */ - return max_t(u32, val, 0xc0); -} - -static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) -{ - dev_priv->gt_pm.rps.gpll_ref_freq = - vlv_get_cck_clock(dev_priv, "GPLL ref", - CCK_GPLL_CLOCK_CONTROL, - dev_priv->czclk_freq); - - DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", - dev_priv->gt_pm.rps.gpll_ref_freq); -} - -static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 val; - - vlv_iosf_sb_get(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); - - vlv_init_gpll_ref_freq(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - switch ((val >> 6) & 3) { - case 0: - case 1: - dev_priv->mem_freq = 800; - break; - case 2: - dev_priv->mem_freq = 1066; - break; - case 3: - dev_priv->mem_freq = 1333; - break; - } - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); - - rps->max_freq = valleyview_rps_max_freq(dev_priv); - rps->rp0_freq = rps->max_freq; - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->max_freq), - rps->max_freq); - - rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv); - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->efficient_freq), - rps->efficient_freq); - - rps->rp1_freq = valleyview_rps_guar_freq(dev_priv); - DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->rp1_freq), - rps->rp1_freq); - - rps->min_freq = valleyview_rps_min_freq(dev_priv); - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->min_freq), - rps->min_freq); - - vlv_iosf_sb_put(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); -} - -static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 val; - - vlv_iosf_sb_get(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); - - vlv_init_gpll_ref_freq(dev_priv); - - val = vlv_cck_read(dev_priv, CCK_FUSE_REG); - - switch ((val >> 2) & 0x7) { - case 3: - dev_priv->mem_freq = 2000; - break; - default: - dev_priv->mem_freq = 1600; - break; - } - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); - - rps->max_freq = cherryview_rps_max_freq(dev_priv); - rps->rp0_freq = rps->max_freq; - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->max_freq), - rps->max_freq); - - rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv); - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->efficient_freq), - rps->efficient_freq); - - rps->rp1_freq = cherryview_rps_guar_freq(dev_priv); - DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->rp1_freq), - rps->rp1_freq); - - rps->min_freq = cherryview_rps_min_freq(dev_priv); - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->min_freq), - rps->min_freq); - - vlv_iosf_sb_put(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); - - WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq | - rps->min_freq) & 1, - "Odd GPU freq values\n"); -} - -static void cherryview_enable_rps(struct drm_i915_private *dev_priv) -{ - u32 val; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 1: Program defaults and thresholds for RPS*/ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); - I915_WRITE(GEN6_RP_UP_EI, 66000); - I915_WRITE(GEN6_RP_DOWN_EI, 350000); - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - /* 2: Enable RPS */ - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - - /* Setting Fixed Bias */ - vlv_punit_get(dev_priv); - - val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; - vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - - vlv_punit_put(dev_priv); - - /* RPS code assumes GPLL is used */ - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); - - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - - reset_rps(dev_priv, valleyview_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void valleyview_enable_rps(struct drm_i915_private *dev_priv) -{ - u32 val; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); - I915_WRITE(GEN6_RP_UP_EI, 66000); - I915_WRITE(GEN6_RP_DOWN_EI, 350000); - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_CONT); - - vlv_punit_get(dev_priv); - - /* Setting Fixed Bias */ - val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; - vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - - vlv_punit_put(dev_priv); - - /* RPS code assumes GPLL is used */ - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); - - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - - reset_rps(dev_priv, valleyview_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static unsigned long intel_pxfreq(u32 vidfreq) -{ - unsigned long freq; - int div = (vidfreq & 0x3f0000) >> 16; - int post = (vidfreq & 0x3000) >> 12; - int pre = (vidfreq & 0x7); - - if (!pre) - return 0; - - freq = ((div * 133333) / ((1<<post) * pre)); - - return freq; -} - -static const struct cparams { - u16 i; - u16 t; - u16 m; - u16 c; -} cparams[] = { - { 1, 1333, 301, 28664 }, - { 1, 1066, 294, 24460 }, - { 1, 800, 294, 25192 }, - { 0, 1333, 276, 27605 }, - { 0, 1066, 276, 27605 }, - { 0, 800, 231, 23784 }, -}; - -static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) -{ - u64 total_count, diff, ret; - u32 count1, count2, count3, m = 0, c = 0; - unsigned long now = jiffies_to_msecs(jiffies), diff1; - int i; - - lockdep_assert_held(&mchdev_lock); - - diff1 = now - dev_priv->ips.last_time1; - - /* Prevent division-by-zero if we are asking too fast. - * Also, we don't get interesting results if we are polling - * faster than once in 10ms, so just return the saved value - * in such cases. - */ - if (diff1 <= 10) - return dev_priv->ips.chipset_power; - - count1 = I915_READ(DMIEC); - count2 = I915_READ(DDREC); - count3 = I915_READ(CSIEC); - - total_count = count1 + count2 + count3; - - /* FIXME: handle per-counter overflow */ - if (total_count < dev_priv->ips.last_count1) { - diff = ~0UL - dev_priv->ips.last_count1; - diff += total_count; - } else { - diff = total_count - dev_priv->ips.last_count1; - } - - for (i = 0; i < ARRAY_SIZE(cparams); i++) { - if (cparams[i].i == dev_priv->ips.c_m && - cparams[i].t == dev_priv->ips.r_t) { - m = cparams[i].m; - c = cparams[i].c; - break; - } - } - - diff = div_u64(diff, diff1); - ret = ((m * diff) + c); - ret = div_u64(ret, 10); - - dev_priv->ips.last_count1 = total_count; - dev_priv->ips.last_time1 = now; - - dev_priv->ips.chipset_power = ret; - - return ret; -} - -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - unsigned long val = 0; - - if (!IS_GEN(dev_priv, 5)) - return 0; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - val = __i915_chipset_val(dev_priv); - spin_unlock_irq(&mchdev_lock); - } - - return val; -} - -unsigned long i915_mch_val(struct drm_i915_private *i915) -{ - unsigned long m, x, b; - u32 tsfs; - - tsfs = intel_uncore_read(&i915->uncore, TSFS); - - m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); - x = intel_uncore_read8(&i915->uncore, TR1); - - b = tsfs & TSFS_INTR_MASK; - - return ((m * x) / 127) - b; -} - -static int _pxvid_to_vd(u8 pxvid) -{ - if (pxvid == 0) - return 0; - - if (pxvid >= 8 && pxvid < 31) - pxvid = 31; - - return (pxvid + 2) * 125; -} - -static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) -{ - const int vd = _pxvid_to_vd(pxvid); - const int vm = vd - 1125; - - if (INTEL_INFO(dev_priv)->is_mobile) - return vm > 0 ? vm : 0; - - return vd; -} - -static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) -{ - u64 now, diff, diffms; - u32 count; - - lockdep_assert_held(&mchdev_lock); - - now = ktime_get_raw_ns(); - diffms = now - dev_priv->ips.last_time2; - do_div(diffms, NSEC_PER_MSEC); - - /* Don't divide by 0 */ - if (!diffms) - return; - - count = I915_READ(GFXEC); - - if (count < dev_priv->ips.last_count2) { - diff = ~0UL - dev_priv->ips.last_count2; - diff += count; - } else { - diff = count - dev_priv->ips.last_count2; - } - - dev_priv->ips.last_count2 = count; - dev_priv->ips.last_time2 = now; - - /* More magic constants... */ - diff = diff * 1181; - diff = div_u64(diff, diffms * 10); - dev_priv->ips.gfx_power = diff; -} - -void i915_update_gfx_val(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - - if (!IS_GEN(dev_priv, 5)) - return; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - __i915_update_gfx_val(dev_priv); - spin_unlock_irq(&mchdev_lock); - } -} - -static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) -{ - unsigned long t, corr, state1, corr2, state2; - u32 pxvid, ext_v; - - lockdep_assert_held(&mchdev_lock); - - pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq)); - pxvid = (pxvid >> 24) & 0x7f; - ext_v = pvid_to_extvid(dev_priv, pxvid); - - state1 = ext_v; - - t = i915_mch_val(dev_priv); - - /* Revel in the empirically derived constants */ - - /* Correction factor in 1/100000 units */ - if (t > 80) - corr = ((t * 2349) + 135940); - else if (t >= 50) - corr = ((t * 964) + 29317); - else /* < 50 */ - corr = ((t * 301) + 1004); - - corr = corr * ((150142 * state1) / 10000 - 78642); - corr /= 100000; - corr2 = (corr * dev_priv->ips.corr); - - state2 = (corr2 * state1) / 10000; - state2 /= 100; /* convert to mW */ - - __i915_update_gfx_val(dev_priv); - - return dev_priv->ips.gfx_power + state2; -} - -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - unsigned long val = 0; - - if (!IS_GEN(dev_priv, 5)) - return 0; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - val = __i915_gfx_val(dev_priv); - spin_unlock_irq(&mchdev_lock); - } - - return val; -} - -static struct drm_i915_private __rcu *i915_mch_dev; - -static struct drm_i915_private *mchdev_get(void) -{ - struct drm_i915_private *i915; - - rcu_read_lock(); - i915 = rcu_dereference(i915_mch_dev); - if (!kref_get_unless_zero(&i915->drm.ref)) - i915 = NULL; - rcu_read_unlock(); - - return i915; -} - -/** - * i915_read_mch_val - return value for IPS use - * - * Calculate and return a value for the IPS driver to use when deciding whether - * we have thermal and power headroom to increase CPU or GPU power budget. - */ -unsigned long i915_read_mch_val(void) -{ - struct drm_i915_private *i915; - unsigned long chipset_val = 0; - unsigned long graphics_val = 0; - intel_wakeref_t wakeref; - - i915 = mchdev_get(); - if (!i915) - return 0; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - chipset_val = __i915_chipset_val(i915); - graphics_val = __i915_gfx_val(i915); - spin_unlock_irq(&mchdev_lock); - } - - drm_dev_put(&i915->drm); - return chipset_val + graphics_val; -} -EXPORT_SYMBOL_GPL(i915_read_mch_val); - -/** - * i915_gpu_raise - raise GPU frequency limit - * - * Raise the limit; IPS indicates we have thermal headroom. - */ -bool i915_gpu_raise(void) -{ - struct drm_i915_private *i915; - - i915 = mchdev_get(); - if (!i915) - return false; - - spin_lock_irq(&mchdev_lock); - if (i915->ips.max_delay > i915->ips.fmax) - i915->ips.max_delay--; - spin_unlock_irq(&mchdev_lock); - - drm_dev_put(&i915->drm); - return true; -} -EXPORT_SYMBOL_GPL(i915_gpu_raise); - -/** - * i915_gpu_lower - lower GPU frequency limit - * - * IPS indicates we're close to a thermal limit, so throttle back the GPU - * frequency maximum. - */ -bool i915_gpu_lower(void) -{ - struct drm_i915_private *i915; - - i915 = mchdev_get(); - if (!i915) - return false; - - spin_lock_irq(&mchdev_lock); - if (i915->ips.max_delay < i915->ips.min_delay) - i915->ips.max_delay++; - spin_unlock_irq(&mchdev_lock); - - drm_dev_put(&i915->drm); - return true; -} -EXPORT_SYMBOL_GPL(i915_gpu_lower); - -/** - * i915_gpu_busy - indicate GPU business to IPS - * - * Tell the IPS driver whether or not the GPU is busy. - */ -bool i915_gpu_busy(void) -{ - struct drm_i915_private *i915; - bool ret; - - i915 = mchdev_get(); - if (!i915) - return false; - - ret = i915->gt.awake; - - drm_dev_put(&i915->drm); - return ret; -} -EXPORT_SYMBOL_GPL(i915_gpu_busy); - -/** - * i915_gpu_turbo_disable - disable graphics turbo - * - * Disable graphics turbo by resetting the max frequency and setting the - * current frequency to the default. - */ -bool i915_gpu_turbo_disable(void) -{ - struct drm_i915_private *i915; - bool ret; - - i915 = mchdev_get(); - if (!i915) - return false; - - spin_lock_irq(&mchdev_lock); - i915->ips.max_delay = i915->ips.fstart; - ret = ironlake_set_drps(i915, i915->ips.fstart); - spin_unlock_irq(&mchdev_lock); - - drm_dev_put(&i915->drm); - return ret; -} -EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); - -/** - * Tells the intel_ips driver that the i915 driver is now loaded, if - * IPS got loaded first. - * - * This awkward dance is so that neither module has to depend on the - * other in order for IPS to do the appropriate communication of - * GPU turbo limits to i915. - */ -static void -ips_ping_for_i915_load(void) -{ - void (*link)(void); - - link = symbol_get(ips_link_to_i915_driver); - if (link) { - link(); - symbol_put(ips_link_to_i915_driver); - } -} - -void intel_gpu_ips_init(struct drm_i915_private *dev_priv) -{ - /* We only register the i915 ips part with intel-ips once everything is - * set up, to avoid intel-ips sneaking in and reading bogus values. */ - rcu_assign_pointer(i915_mch_dev, dev_priv); - - ips_ping_for_i915_load(); -} - -void intel_gpu_ips_teardown(void) -{ - rcu_assign_pointer(i915_mch_dev, NULL); -} - -static void intel_init_emon(struct drm_i915_private *dev_priv) -{ - u32 lcfuse; - u8 pxw[16]; - int i; - - /* Disable to program */ - I915_WRITE(ECR, 0); - POSTING_READ(ECR); - - /* Program energy weights for various events */ - I915_WRITE(SDEW, 0x15040d00); - I915_WRITE(CSIEW0, 0x007f0000); - I915_WRITE(CSIEW1, 0x1e220004); - I915_WRITE(CSIEW2, 0x04000004); - - for (i = 0; i < 5; i++) - I915_WRITE(PEW(i), 0); - for (i = 0; i < 3; i++) - I915_WRITE(DEW(i), 0); - - /* Program P-state weights to account for frequency power adjustment */ - for (i = 0; i < 16; i++) { - u32 pxvidfreq = I915_READ(PXVFREQ(i)); - unsigned long freq = intel_pxfreq(pxvidfreq); - unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - unsigned long val; - - val = vid * vid; - val *= (freq / 1000); - val *= 255; - val /= (127*127*900); - if (val > 0xff) - DRM_ERROR("bad pxval: %ld\n", val); - pxw[i] = val; - } - /* Render standby states get 0 weight */ - pxw[14] = 0; - pxw[15] = 0; - - for (i = 0; i < 4; i++) { - u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | - (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); - I915_WRITE(PXW(i), val); - } - - /* Adjust magic regs to magic values (more experimental results) */ - I915_WRITE(OGW0, 0); - I915_WRITE(OGW1, 0); - I915_WRITE(EG0, 0x00007f00); - I915_WRITE(EG1, 0x0000000e); - I915_WRITE(EG2, 0x000e0000); - I915_WRITE(EG3, 0x68000300); - I915_WRITE(EG4, 0x42000000); - I915_WRITE(EG5, 0x00140031); - I915_WRITE(EG6, 0); - I915_WRITE(EG7, 0); - - for (i = 0; i < 8; i++) - I915_WRITE(PXWL(i), 0); - - /* Enable PMON + select events */ - I915_WRITE(ECR, 0x80000019); - - lcfuse = I915_READ(LCFUSE02); - - dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); -} - -void intel_init_gt_powersave(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev_priv)) - mkwrite_device_info(dev_priv)->has_rps = false; - - /* Initialize RPS limits (for userspace) */ - if (IS_CHERRYVIEW(dev_priv)) - cherryview_init_gt_powersave(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_init_gt_powersave(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_init_rps_frequencies(dev_priv); - - /* Derive initial user preferences/limits from the hardware limits */ - rps->max_freq_softlimit = rps->max_freq; - rps->min_freq_softlimit = rps->min_freq; - - /* After setting max-softlimit, find the overclock max freq */ - if (IS_GEN(dev_priv, 6) || - IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { - u32 params = 0; - - sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, - ¶ms, NULL); - if (params & BIT(31)) { /* OC supported */ - DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", - (rps->max_freq & 0xff) * 50, - (params & 0xff) * 50); - rps->max_freq = params & 0xff; - } - } - - /* Finally allow us to boost to max by default */ - rps->boost_freq = rps->max_freq; - rps->idle_freq = rps->min_freq; - rps->cur_freq = rps->idle_freq; -} - -void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) -{ - dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */ - intel_disable_gt_powersave(dev_priv); - - if (INTEL_GEN(dev_priv) >= 11) - gen11_reset_rps_interrupts(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_reset_rps_interrupts(dev_priv); -} - -static void intel_disable_rps(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->gt_pm.rps.lock); - - if (!dev_priv->gt_pm.rps.enabled) - return; - - if (INTEL_GEN(dev_priv) >= 9) - gen9_disable_rps(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - cherryview_disable_rps(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_disable_rps(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_disable_rps(dev_priv); - else if (IS_IRONLAKE_M(dev_priv)) - ironlake_disable_drps(dev_priv); - - dev_priv->gt_pm.rps.enabled = false; -} - -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) -{ - mutex_lock(&dev_priv->gt_pm.rps.lock); - - intel_disable_rps(dev_priv); - if (HAS_LLC(dev_priv)) - intel_llc_disable(&dev_priv->gt.llc); - - mutex_unlock(&dev_priv->gt_pm.rps.lock); -} - -static void intel_enable_rps(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - lockdep_assert_held(&rps->lock); - - if (rps->enabled) - return; - - if (IS_CHERRYVIEW(dev_priv)) { - cherryview_enable_rps(dev_priv); - } else if (IS_VALLEYVIEW(dev_priv)) { - valleyview_enable_rps(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 9) { - gen9_enable_rps(dev_priv); - } else if (IS_BROADWELL(dev_priv)) { - gen8_enable_rps(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 6) { - gen6_enable_rps(dev_priv); - } else if (IS_IRONLAKE_M(dev_priv)) { - ironlake_enable_drps(dev_priv); - intel_init_emon(dev_priv); - } - - WARN_ON(rps->max_freq < rps->min_freq); - WARN_ON(rps->idle_freq > rps->max_freq); - - WARN_ON(rps->efficient_freq < rps->min_freq); - WARN_ON(rps->efficient_freq > rps->max_freq); - - rps->enabled = true; -} - -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) -{ - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev_priv)) - return; - - mutex_lock(&dev_priv->gt_pm.rps.lock); - - if (HAS_RPS(dev_priv)) - intel_enable_rps(dev_priv); - - intel_llc_enable(&dev_priv->gt.llc); - - mutex_unlock(&dev_priv->gt_pm.rps.lock); -} - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -8079,7 +6395,6 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; if (dev_priv->vbt.fdi_rx_polarity_inverted) val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; I915_WRITE(TRANS_CHICKEN2(pipe), val); @@ -8942,90 +7257,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) } } -static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* - * N = val - 0xb7 - * Slow = Fast = GPLL ref * N - */ - return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); -} - -static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; -} - -static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* - * N = val / 2 - * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 - */ - return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); -} - -static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* CHV needs even values */ - return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; -} - -int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) -{ - if (INTEL_GEN(dev_priv) >= 9) - return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, - GEN9_FREQ_SCALER); - else if (IS_CHERRYVIEW(dev_priv)) - return chv_gpu_freq(dev_priv, val); - else if (IS_VALLEYVIEW(dev_priv)) - return byt_gpu_freq(dev_priv, val); - else - return val * GT_FREQUENCY_MULTIPLIER; -} - -int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) -{ - if (INTEL_GEN(dev_priv) >= 9) - return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, - GT_FREQUENCY_MULTIPLIER); - else if (IS_CHERRYVIEW(dev_priv)) - return chv_freq_opcode(dev_priv, val); - else if (IS_VALLEYVIEW(dev_priv)) - return byt_freq_opcode(dev_priv, val); - else - return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); -} - void intel_pm_setup(struct drm_i915_private *dev_priv) { - mutex_init(&dev_priv->gt_pm.rps.lock); - mutex_init(&dev_priv->gt_pm.rps.power.mutex); - - atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0); - dev_priv->runtime_pm.suspended = false; atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); } - -u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat) -{ - u32 cagf; - - if (INTEL_GEN(dev_priv) >= 9) - cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; - else - cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; - - return cagf; -} diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 93d192d0610a..c06c6a846d9a 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -29,15 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc); void intel_init_pm(struct drm_i915_private *dev_priv); void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_i915_private *dev_priv); -void intel_gpu_ips_init(struct drm_i915_private *dev_priv); -void intel_gpu_ips_teardown(void); -void intel_init_gt_powersave(struct drm_i915_private *dev_priv); -void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); -void gen6_rps_busy(struct drm_i915_private *dev_priv); -void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct i915_request *rq); void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); @@ -63,25 +54,10 @@ void skl_write_plane_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state); void skl_write_cursor_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state); -bool ilk_disable_lp_wm(struct drm_device *dev); -int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, - struct intel_crtc_state *cstate); +bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv); void intel_init_ipc(struct drm_i915_private *dev_priv); void intel_enable_ipc(struct drm_i915_private *dev_priv); -int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); -int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); - -u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); - -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); -unsigned long i915_mch_val(struct drm_i915_private *dev_priv); -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); -void i915_update_gfx_val(struct drm_i915_private *dev_priv); - -bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); -int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); -void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive); bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); #endif /* __INTEL_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c new file mode 100644 index 000000000000..eddb392917aa --- /dev/null +++ b/drivers/gpu/drm/i915/intel_region_lmem.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_region.h" +#include "intel_region_lmem.h" + +static int init_fake_lmem_bar(struct intel_memory_region *mem) +{ + struct drm_i915_private *i915 = mem->i915; + struct i915_ggtt *ggtt = &i915->ggtt; + unsigned long n; + int ret; + + /* We want to 1:1 map the mappable aperture to our reserved region */ + + mem->fake_mappable.start = 0; + mem->fake_mappable.size = resource_size(&mem->region); + mem->fake_mappable.color = I915_COLOR_UNEVICTABLE; + + ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable); + if (ret) + return ret; + + mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev, + mem->region.start, + mem->fake_mappable.size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_FORCE_CONTIGUOUS); + if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) { + drm_mm_remove_node(&mem->fake_mappable); + return -EINVAL; + } + + for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) { + ggtt->vm.insert_page(&ggtt->vm, + mem->remap_addr + (n << PAGE_SHIFT), + n << PAGE_SHIFT, + I915_CACHE_NONE, 0); + } + + mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr, + mem->fake_mappable.size); + + return 0; +} + +static void release_fake_lmem_bar(struct intel_memory_region *mem) +{ + if (!drm_mm_node_allocated(&mem->fake_mappable)) + return; + + drm_mm_remove_node(&mem->fake_mappable); + + dma_unmap_resource(&mem->i915->drm.pdev->dev, + mem->remap_addr, + mem->fake_mappable.size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_FORCE_CONTIGUOUS); +} + +static void +region_lmem_release(struct intel_memory_region *mem) +{ + release_fake_lmem_bar(mem); + io_mapping_fini(&mem->iomap); + intel_memory_region_release_buddy(mem); +} + +static int +region_lmem_init(struct intel_memory_region *mem) +{ + int ret; + + if (i915_modparams.fake_lmem_start) { + ret = init_fake_lmem_bar(mem); + GEM_BUG_ON(ret); + } + + if (!io_mapping_init_wc(&mem->iomap, + mem->io_start, + resource_size(&mem->region))) + return -EIO; + + ret = intel_memory_region_init_buddy(mem); + if (ret) + io_mapping_fini(&mem->iomap); + + return ret; +} + +const struct intel_memory_region_ops intel_region_lmem_ops = { + .init = region_lmem_init, + .release = region_lmem_release, + .create_object = __i915_gem_lmem_object_create, +}; + +struct intel_memory_region * +intel_setup_fake_lmem(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + struct intel_memory_region *mem; + resource_size_t mappable_end; + resource_size_t io_start; + resource_size_t start; + + GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt)); + GEM_BUG_ON(!i915_modparams.fake_lmem_start); + + /* Your mappable aperture belongs to me now! */ + mappable_end = pci_resource_len(pdev, 2); + io_start = pci_resource_start(pdev, 2), + start = i915_modparams.fake_lmem_start; + + mem = intel_memory_region_create(i915, + start, + mappable_end, + PAGE_SIZE, + io_start, + &intel_region_lmem_ops); + if (!IS_ERR(mem)) { + DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region); + DRM_INFO("Intel graphics fake LMEM IO start: %llx\n", + (u64)mem->io_start); + DRM_INFO("Intel graphics fake LMEM size: %llx\n", + (u64)resource_size(&mem->region)); + } + + return mem; +} diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h new file mode 100644 index 000000000000..213def7c7b8a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_region_lmem.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_REGION_LMEM_H +#define __INTEL_REGION_LMEM_H + +struct drm_i915_private; + +extern const struct intel_memory_region_ops intel_region_lmem_ops; + +struct intel_memory_region * +intel_setup_fake_lmem(struct drm_i915_private *i915); + +#endif /* !__INTEL_REGION_LMEM_H */ diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 868cc78048d0..59aa1b6f1827 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -54,7 +54,8 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf) static void ____intel_wakeref_put_last(struct intel_wakeref *wf) { - if (!atomic_dec_and_test(&wf->count)) + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); + if (unlikely(!atomic_dec_and_test(&wf->count))) goto unlock; /* ops->put() must reschedule its own release on error/deferral */ @@ -67,13 +68,12 @@ unlock: mutex_unlock(&wf->mutex); } -void __intel_wakeref_put_last(struct intel_wakeref *wf) +void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) { INTEL_WAKEREF_BUG_ON(work_pending(&wf->work)); /* Assume we are not in process context and so cannot sleep. */ - if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC || - !mutex_trylock(&wf->mutex)) { + if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { schedule_work(&wf->work); return; } @@ -109,8 +109,17 @@ void __intel_wakeref_init(struct intel_wakeref *wf, int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) { - return wait_var_event_killable(&wf->wakeref, - !intel_wakeref_is_active(wf)); + int err; + + might_sleep(); + + err = wait_var_event_killable(&wf->wakeref, + !intel_wakeref_is_active(wf)); + if (err) + return err; + + intel_wakeref_unlock_wait(wf); + return 0; } static void wakeref_auto_timeout(struct timer_list *t) diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 5f0c972a80fb..8d945db94b7a 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -9,6 +9,7 @@ #include <linux/atomic.h> #include <linux/bits.h> +#include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/refcount.h> #include <linux/stackdepot.h> @@ -29,9 +30,6 @@ typedef depot_stack_handle_t intel_wakeref_t; struct intel_wakeref_ops { int (*get)(struct intel_wakeref *wf); int (*put)(struct intel_wakeref *wf); - - unsigned long flags; -#define INTEL_WAKEREF_PUT_ASYNC BIT(0) }; struct intel_wakeref { @@ -57,13 +55,11 @@ void __intel_wakeref_init(struct intel_wakeref *wf, } while (0) int __intel_wakeref_get_first(struct intel_wakeref *wf); -void __intel_wakeref_put_last(struct intel_wakeref *wf); +void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); /** * intel_wakeref_get: Acquire the wakeref - * @i915: the drm_i915_private device * @wf: the wakeref - * @fn: callback for acquired the wakeref, called only on first acquire. * * Acquire a hold on the wakeref. The first user to do so, will acquire * the runtime pm wakeref and then call the @fn underneath the wakeref @@ -78,6 +74,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf); static inline int intel_wakeref_get(struct intel_wakeref *wf) { + might_sleep(); if (unlikely(!atomic_inc_not_zero(&wf->count))) return __intel_wakeref_get_first(wf); @@ -85,6 +82,22 @@ intel_wakeref_get(struct intel_wakeref *wf) } /** + * __intel_wakeref_get: Acquire the wakeref, again + * @wf: the wakeref + * + * Increment the wakeref counter, only valid if it is already held by + * the caller. + * + * See intel_wakeref_get(). + */ +static inline void +__intel_wakeref_get(struct intel_wakeref *wf) +{ + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); + atomic_inc(&wf->count); +} + +/** * intel_wakeref_get_if_in_use: Acquire the wakeref * @wf: the wakeref * @@ -100,10 +113,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) } /** - * intel_wakeref_put: Release the wakeref - * @i915: the drm_i915_private device + * intel_wakeref_put_flags: Release the wakeref * @wf: the wakeref - * @fn: callback for releasing the wakeref, called only on final release. + * @flags: control flags * * Release our hold on the wakeref. When there are no more users, * the runtime pm wakeref will be released after the @fn callback is called @@ -116,11 +128,25 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) * code otherwise. */ static inline void -intel_wakeref_put(struct intel_wakeref *wf) +__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) +#define INTEL_WAKEREF_PUT_ASYNC BIT(0) { INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) - __intel_wakeref_put_last(wf); + __intel_wakeref_put_last(wf, flags); +} + +static inline void +intel_wakeref_put(struct intel_wakeref *wf) +{ + might_sleep(); + __intel_wakeref_put(wf, 0); +} + +static inline void +intel_wakeref_put_async(struct intel_wakeref *wf) +{ + __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); } /** @@ -152,6 +178,21 @@ intel_wakeref_unlock(struct intel_wakeref *wf) } /** + * intel_wakeref_unlock_wait: Wait until the active callback is complete + * @wf: the wakeref + * + * Waits for the active callback (under the @wf->mutex or another CPU) is + * complete. + */ +static inline void +intel_wakeref_unlock_wait(struct intel_wakeref *wf) +{ + mutex_lock(&wf->mutex); + mutex_unlock(&wf->mutex); + flush_work(&wf->work); +} + +/** * intel_wakeref_is_active: Query whether the wakeref is currently held * @wf: the wakeref * @@ -170,6 +211,7 @@ intel_wakeref_is_active(const struct intel_wakeref *wf) static inline void __intel_wakeref_defer_park(struct intel_wakeref *wf) { + lockdep_assert_held(&wf->mutex); INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); atomic_set_release(&wf->count, 1); } diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c new file mode 100644 index 000000000000..a29d93707345 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include <linux/sysfs.h> + +#include "i915_drv.h" +#include "i915_oa_tgl.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0xD920), 0x00000000 }, + { _MMIO(0xD900), 0x00000000 }, + { _MMIO(0xD904), 0xF0800000 }, + { _MMIO(0xD910), 0x00000000 }, + { _MMIO(0xD914), 0xF0800000 }, + { _MMIO(0xDC40), 0x00FF0000 }, + { _MMIO(0xD940), 0x00000004 }, + { _MMIO(0xD944), 0x0000FFFF }, + { _MMIO(0xDC00), 0x00000004 }, + { _MMIO(0xDC04), 0x0000FFFF }, + { _MMIO(0xD948), 0x00000003 }, + { _MMIO(0xD94C), 0x0000FFFF }, + { _MMIO(0xDC08), 0x00000003 }, + { _MMIO(0xDC0C), 0x0000FFFF }, + { _MMIO(0xD950), 0x00000007 }, + { _MMIO(0xD954), 0x0000FFFF }, + { _MMIO(0xDC10), 0x00000007 }, + { _MMIO(0xDC14), 0x0000FFFF }, + { _MMIO(0xD958), 0x00100002 }, + { _MMIO(0xD95C), 0x0000FFF7 }, + { _MMIO(0xDC18), 0x00100002 }, + { _MMIO(0xDC1C), 0x0000FFF7 }, + { _MMIO(0xD960), 0x00100002 }, + { _MMIO(0xD964), 0x0000FFCF }, + { _MMIO(0xDC20), 0x00100002 }, + { _MMIO(0xDC24), 0x0000FFCF }, + { _MMIO(0xD968), 0x00100082 }, + { _MMIO(0xD96C), 0x0000FFEF }, + { _MMIO(0xDC28), 0x00100082 }, + { _MMIO(0xDC2C), 0x0000FFEF }, + { _MMIO(0xD970), 0x001000C2 }, + { _MMIO(0xD974), 0x0000FFE7 }, + { _MMIO(0xDC30), 0x001000C2 }, + { _MMIO(0xDC34), 0x0000FFE7 }, + { _MMIO(0xD978), 0x00100001 }, + { _MMIO(0xD97C), 0x0000FFE7 }, + { _MMIO(0xDC38), 0x00100001 }, + { _MMIO(0xDC3C), 0x0000FFE7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x0D04), 0x00000200 }, + { _MMIO(0x9840), 0x00000000 }, + { _MMIO(0x9884), 0x00000000 }, + { _MMIO(0x9888), 0x280E0000 }, + { _MMIO(0x9888), 0x1E0E0147 }, + { _MMIO(0x9888), 0x180E0000 }, + { _MMIO(0x9888), 0x160E0000 }, + { _MMIO(0x9888), 0x1E0F1000 }, + { _MMIO(0x9888), 0x1E104000 }, + { _MMIO(0x9888), 0x2E020100 }, + { _MMIO(0x9888), 0x2C030004 }, + { _MMIO(0x9888), 0x38003000 }, + { _MMIO(0x9888), 0x1E0A8000 }, + { _MMIO(0x9884), 0x00000003 }, + { _MMIO(0x9888), 0x49110000 }, + { _MMIO(0x9888), 0x5D101400 }, + { _MMIO(0x9888), 0x1D140020 }, + { _MMIO(0x9888), 0x1D1103A3 }, + { _MMIO(0x9888), 0x01110000 }, + { _MMIO(0x9888), 0x61111000 }, + { _MMIO(0x9888), 0x1F128000 }, + { _MMIO(0x9888), 0x17100000 }, + { _MMIO(0x9888), 0x55100630 }, + { _MMIO(0x9888), 0x57100000 }, + { _MMIO(0x9888), 0x31100000 }, + { _MMIO(0x9884), 0x00000003 }, + { _MMIO(0x9888), 0x65100002 }, + { _MMIO(0x9884), 0x00000000 }, + { _MMIO(0x9888), 0x42000001 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.test_config.uuid, + "80a833f0-2504-4321-8894-e9277844ce7b", + sizeof(dev_priv->perf.test_config.uuid)); + dev_priv->perf.test_config.id = 1; + + dev_priv->perf.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b"; + dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; + + dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; + + dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h new file mode 100644 index 000000000000..4c25f0be825c --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_TGL_H__ +#define __I915_OA_TGL_H__ + +struct drm_i915_private; + +void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 268192b5613b..ef572a0c2566 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -79,7 +79,6 @@ __live_active_setup(struct drm_i915_private *i915) struct intel_engine_cs *engine; struct i915_sw_fence *submit; struct live_active *active; - enum intel_engine_id id; unsigned int count = 0; int err = 0; @@ -97,10 +96,10 @@ __live_active_setup(struct drm_i915_private *i915) if (err) goto out; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct i915_request *rq; - rq = i915_request_create(engine->kernel_context); + rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); break; @@ -156,7 +155,11 @@ static int live_active_wait(void *arg) i915_active_wait(&active->base); if (!READ_ONCE(active->retired)) { + struct drm_printer p = drm_err_printer(__func__); + pr_err("i915_active not retired after waiting!\n"); + i915_active_print(&active->base, &p); + err = -EINVAL; } @@ -185,7 +188,11 @@ static int live_active_retire(void *arg) err = -EIO; if (!READ_ONCE(active->retired)) { + struct drm_printer p = drm_err_printer(__func__); + pr_err("i915_active not retired after flushing!\n"); + i915_active_print(&active->base, &p); + err = -EINVAL; } @@ -206,3 +213,81 @@ int i915_active_live_selftests(struct drm_i915_private *i915) return i915_subtests(tests, i915); } + +static struct intel_engine_cs *node_to_barrier(struct active_node *it) +{ + struct intel_engine_cs *engine; + + if (!is_barrier(&it->base)) + return NULL; + + engine = __barrier_to_engine(it); + smp_rmb(); /* serialise with add_active_barriers */ + if (!is_barrier(&it->base)) + return NULL; + + return engine; +} + +void i915_active_print(struct i915_active *ref, struct drm_printer *m) +{ + drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire); + drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count)); + drm_printf(m, "\tpreallocated barriers? %s\n", + yesno(!llist_empty(&ref->preallocated_barriers))); + + if (i915_active_acquire_if_busy(ref)) { + struct active_node *it, *n; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + struct intel_engine_cs *engine; + + engine = node_to_barrier(it); + if (engine) { + drm_printf(m, "\tbarrier: %s\n", engine->name); + continue; + } + + if (i915_active_fence_isset(&it->base)) { + drm_printf(m, + "\ttimeline: %llx\n", it->timeline); + continue; + } + } + + i915_active_release(ref); + } +} + +static void spin_unlock_wait(spinlock_t *lock) +{ + spin_lock_irq(lock); + spin_unlock_irq(lock); +} + +void i915_active_unlock_wait(struct i915_active *ref) +{ + if (i915_active_acquire_if_busy(ref)) { + struct active_node *it, *n; + + rcu_read_lock(); + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + struct dma_fence *f; + + /* Wait for all active callbacks */ + f = rcu_dereference(it->base.fence); + if (f) + spin_unlock_wait(f->lock); + } + rcu_read_unlock(); + + i915_active_release(ref); + } + + /* And wait for the retire callback */ + spin_lock_irq(&ref->tree_lock); + spin_unlock_irq(&ref->tree_lock); + + /* ... which may have been on a thread instead */ + flush_work(&ref->work); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 97f89f744ee2..b37fc53973cc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -9,29 +9,33 @@ #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" #include "i915_selftest.h" #include "igt_flush_test.h" #include "mock_drm.h" -static int switch_to_context(struct drm_i915_private *i915, - struct i915_gem_context *ctx) +static int switch_to_context(struct i915_gem_context *ctx) { - struct intel_engine_cs *engine; - enum intel_engine_id id; + struct i915_gem_engines_iter it; + struct intel_context *ce; + int err = 0; - for_each_engine(engine, i915, id) { + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct i915_request *rq; - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) - return PTR_ERR(rq); + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } i915_request_add(rq); } + i915_gem_context_unlock_engines(ctx); - return 0; + return err; } static void trash_stolen(struct drm_i915_private *i915) @@ -42,6 +46,10 @@ static void trash_stolen(struct drm_i915_private *i915) unsigned long page; u32 prng = 0x12345678; + /* XXX: fsck. needs some more thought... */ + if (!i915_ggtt_has_aperture(ggtt)) + return; + for (page = 0; page < size; page += PAGE_SIZE) { const dma_addr_t dma = i915->dsm.start + page; u32 __iomem *s; @@ -117,7 +125,6 @@ static void pm_resume(struct drm_i915_private *i915) */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { intel_gt_sanitize(&i915->gt, false); - i915_gem_sanitize(i915); i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(&i915->ggtt); @@ -130,7 +137,7 @@ static int igt_gem_suspend(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx; - struct drm_file *file; + struct file *file; int err; file = mock_file(i915); @@ -140,7 +147,7 @@ static int igt_gem_suspend(void *arg) err = -ENOMEM; ctx = live_context(i915, file); if (!IS_ERR(ctx)) - err = switch_to_context(i915, ctx); + err = switch_to_context(ctx); if (err) goto out; @@ -155,9 +162,9 @@ static int igt_gem_suspend(void *arg) pm_resume(i915); - err = switch_to_context(i915, ctx); + err = switch_to_context(ctx); out: - mock_file_free(i915, file); + fput(file); return err; } @@ -165,7 +172,7 @@ static int igt_gem_hibernate(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx; - struct drm_file *file; + struct file *file; int err; file = mock_file(i915); @@ -175,7 +182,7 @@ static int igt_gem_hibernate(void *arg) err = -ENOMEM; ctx = live_context(i915, file); if (!IS_ERR(ctx)) - err = switch_to_context(i915, ctx); + err = switch_to_context(ctx); if (err) goto out; @@ -190,9 +197,9 @@ static int igt_gem_hibernate(void *arg) pm_resume(i915); - err = switch_to_context(i915, ctx); + err = switch_to_context(ctx); out: - mock_file_free(i915, file); + fput(file); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 0af9a58d011d..06ef88510209 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -43,8 +43,7 @@ static void quirk_add(struct drm_i915_gem_object *obj, list_add(&obj->st_link, objects); } -static int populate_ggtt(struct drm_i915_private *i915, - struct list_head *objects) +static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) { unsigned long unbound, bound, count; struct drm_i915_gem_object *obj; @@ -53,7 +52,8 @@ static int populate_ggtt(struct drm_i915_private *i915, do { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -70,7 +70,7 @@ static int populate_ggtt(struct drm_i915_private *i915, count++; } while (1); pr_debug("Filled GGTT with %lu pages [%llu total]\n", - count, i915->ggtt.vm.total / PAGE_SIZE); + count, ggtt->vm.total / PAGE_SIZE); bound = 0; unbound = 0; @@ -96,7 +96,7 @@ static int populate_ggtt(struct drm_i915_private *i915, return -EINVAL; } - if (list_empty(&i915->ggtt.vm.bound_list)) { + if (list_empty(&ggtt->vm.bound_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; } @@ -104,17 +104,16 @@ static int populate_ggtt(struct drm_i915_private *i915, return 0; } -static void unpin_ggtt(struct drm_i915_private *i915) +static void unpin_ggtt(struct i915_ggtt *ggtt) { struct i915_vma *vma; - list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link) + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) if (vma->obj->mm.quirked) i915_vma_unpin(vma); } -static void cleanup_objects(struct drm_i915_private *i915, - struct list_head *list) +static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list) { struct drm_i915_gem_object *obj, *on; @@ -124,19 +123,19 @@ static void cleanup_objects(struct drm_i915_private *i915, i915_gem_object_put(obj); } - i915_gem_drain_freed_objects(i915); + i915_gem_drain_freed_objects(ggtt->vm.i915); } static int igt_evict_something(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict one. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; @@ -153,7 +152,7 @@ static int igt_evict_something(void *arg) goto cleanup; } - unpin_ggtt(i915); + unpin_ggtt(ggtt); /* Everything is unpinned, we should be able to evict something */ mutex_lock(&ggtt->vm.mutex); @@ -169,13 +168,14 @@ static int igt_evict_something(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } static int igt_overcommit(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; struct drm_i915_gem_object *obj; struct i915_vma *vma; LIST_HEAD(objects); @@ -185,11 +185,11 @@ static int igt_overcommit(void *arg) * We expect it to fail. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto cleanup; @@ -198,21 +198,21 @@ static int igt_overcommit(void *arg) quirk_add(obj, &objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) { - pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma)); + if (vma != ERR_PTR(-ENOSPC)) { + pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma)); err = -EINVAL; goto cleanup; } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } static int igt_evict_for_vma(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; struct drm_mm_node target = { .start = 0, .size = 4096, @@ -222,7 +222,7 @@ static int igt_evict_for_vma(void *arg) /* Fill the GGTT with pinned objects and try to evict a range. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; @@ -236,7 +236,7 @@ static int igt_evict_for_vma(void *arg) goto cleanup; } - unpin_ggtt(i915); + unpin_ggtt(ggtt); /* Everything is unpinned, we should be able to evict the node */ mutex_lock(&ggtt->vm.mutex); @@ -249,7 +249,7 @@ static int igt_evict_for_vma(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } @@ -262,8 +262,8 @@ static void mock_color_adjust(const struct drm_mm_node *node, static int igt_evict_for_cache_color(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; const unsigned long flags = PIN_OFFSET_FIXED; struct drm_mm_node target = { .start = I915_GTT_PAGE_SIZE * 2, @@ -284,7 +284,7 @@ static int igt_evict_for_cache_color(void *arg) ggtt->vm.mm.color_adjust = mock_color_adjust; GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm)); - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto cleanup; @@ -300,7 +300,7 @@ static int igt_evict_for_cache_color(void *arg) goto cleanup; } - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto cleanup; @@ -345,22 +345,22 @@ static int igt_evict_for_cache_color(void *arg) err = 0; cleanup: - unpin_ggtt(i915); - cleanup_objects(i915, &objects); + unpin_ggtt(ggtt); + cleanup_objects(ggtt, &objects); ggtt->vm.mm.color_adjust = NULL; return err; } static int igt_evict_vm(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict everything. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; @@ -374,7 +374,7 @@ static int igt_evict_vm(void *arg) goto cleanup; } - unpin_ggtt(i915); + unpin_ggtt(ggtt); mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_vm(&ggtt->vm); @@ -386,14 +386,16 @@ static int igt_evict_vm(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } static int igt_evict_contexts(void *arg) { const u64 PRETEND_GGTT_SIZE = 16ull << 20; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; + struct drm_i915_private *i915 = gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; struct reserved { @@ -423,10 +425,10 @@ static int igt_evict_contexts(void *arg) /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); - mutex_lock(&i915->ggtt.vm.mutex); - err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole, + mutex_lock(&ggtt->vm.mutex); + err = i915_gem_gtt_insert(&ggtt->vm, &hole, PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, PIN_NOEVICT); if (err) goto out_locked; @@ -436,17 +438,17 @@ static int igt_evict_contexts(void *arg) do { struct reserved *r; - mutex_unlock(&i915->ggtt.vm.mutex); + mutex_unlock(&ggtt->vm.mutex); r = kcalloc(1, sizeof(*r), GFP_KERNEL); - mutex_lock(&i915->ggtt.vm.mutex); + mutex_lock(&ggtt->vm.mutex); if (!r) { err = -ENOMEM; goto out_locked; } - if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node, + if (i915_gem_gtt_insert(&ggtt->vm, &r->node, 1ul << 20, 0, I915_COLOR_UNEVICTABLE, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, PIN_NOEVICT)) { kfree(r); break; @@ -458,13 +460,13 @@ static int igt_evict_contexts(void *arg) count++; } while (1); drm_mm_remove_node(&hole); - mutex_unlock(&i915->ggtt.vm.mutex); + mutex_unlock(&ggtt->vm.mutex); pr_info("Filled GGTT with %lu 1MiB nodes\n", count); /* Overfill the GGTT with context objects and so try to evict one. */ - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_sw_fence fence; - struct drm_file *file; + struct file *file; file = mock_file(i915); if (IS_ERR(file)) { @@ -513,12 +515,12 @@ static int igt_evict_contexts(void *arg) pr_info("Submitted %lu contexts/requests on %s\n", count, engine->name); - mock_file_free(i915, file); + fput(file); if (err) break; } - mutex_lock(&i915->ggtt.vm.mutex); + mutex_lock(&ggtt->vm.mutex); out_locked: if (igt_flush_test(i915)) err = -EIO; @@ -532,7 +534,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - mutex_unlock(&i915->ggtt.vm.mutex); + mutex_unlock(&ggtt->vm.mutex); intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; @@ -556,7 +558,7 @@ int i915_gem_evict_mock_selftests(void) return -ENOMEM; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = i915_subtests(tests, i915); + err = i915_subtests(tests, &i915->gt); drm_dev_put(&i915->drm); return err; @@ -571,5 +573,5 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, i915); + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index ebe735df6504..80cde5bda922 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -104,6 +104,7 @@ static const struct drm_i915_gem_object_ops fake_ops = { static struct drm_i915_gem_object * fake_dma_object(struct drm_i915_private *i915, u64 size) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); @@ -117,7 +118,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) goto err; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &fake_ops); + i915_gem_object_init(obj, &fake_ops, &lock_class); i915_gem_object_set_volatile(obj); @@ -211,10 +212,12 @@ static int lowlevel_hole(struct drm_i915_private *i915, unsigned long end_time) { I915_RND_STATE(seed_prng); + struct i915_vma *mock_vma; unsigned int size; - struct i915_vma mock_vma; - memset(&mock_vma, 0, sizeof(struct i915_vma)); + mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL); + if (!mock_vma) + return -ENOMEM; /* Keep creating larger objects until one cannot fit into the hole */ for (size = 12; (hole_end - hole_start) >> size; size++) { @@ -238,8 +241,10 @@ static int lowlevel_hole(struct drm_i915_private *i915, if (order) break; } while (count >>= 1); - if (!count) + if (!count) { + kfree(mock_vma); return -ENOMEM; + } GEM_BUG_ON(!order); GEM_BUG_ON(count * BIT_ULL(size) > vm->total); @@ -282,12 +287,12 @@ static int lowlevel_hole(struct drm_i915_private *i915, vm->allocate_va_range(vm, addr, BIT_ULL(size))) break; - mock_vma.pages = obj->mm.pages; - mock_vma.node.size = BIT_ULL(size); - mock_vma.node.start = addr; + mock_vma->pages = obj->mm.pages; + mock_vma->node.size = BIT_ULL(size); + mock_vma->node.start = addr; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - vm->insert_entries(vm, &mock_vma, + vm->insert_entries(vm, mock_vma, I915_CACHE_NONE, 0); } count = n; @@ -310,6 +315,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, cleanup_freed_objects(i915); } + kfree(mock_vma); return 0; } @@ -1000,9 +1006,9 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, u64 hole_start, u64 hole_end, unsigned long end_time)) { - struct drm_file *file; struct i915_ppgtt *ppgtt; IGT_TIMEOUT(end_time); + struct file *file; int err; if (!HAS_FULL_PPGTT(dev_priv)) @@ -1025,7 +1031,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, i915_vm_put(&ppgtt->vm); out_free: - mock_file_free(dev_priv, file); + fput(file); return err; } @@ -1148,6 +1154,9 @@ static int igt_ggtt_page(void *arg) unsigned int *order, n; int err; + if (!i915_ggtt_has_aperture(ggtt)) + return 0; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -1157,11 +1166,13 @@ static int igt_ggtt_page(void *arg) goto out_free; memset(&tmp, 0, sizeof(tmp)); + mutex_lock(&ggtt->vm.mutex); err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp, count * PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); + mutex_unlock(&ggtt->vm.mutex); if (err) goto out_unpin; @@ -1213,7 +1224,9 @@ static int igt_ggtt_page(void *arg) out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_lock(&ggtt->vm.mutex); drm_mm_remove_node(&tmp); + mutex_unlock(&ggtt->vm.mutex); out_unpin: i915_gem_object_unpin_pages(obj); out_free: @@ -1778,9 +1791,9 @@ static int igt_cs_tlb(void *arg) struct i915_address_space *vm; struct i915_gem_context *ctx; struct intel_context *ce; - struct drm_file *file; struct i915_vma *vma; I915_RND_STATE(prng); + struct file *file; unsigned int i; u32 *result; u32 *batch; @@ -2018,7 +2031,7 @@ out_put_bbe: out_vm: i915_vm_put(vm); out_unlock: - mock_file_free(i915, file); + fput(file); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 6daf6599ec79..476fba2ed8bb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -16,7 +16,9 @@ selftest(gt_engines, intel_engine_live_selftests) selftest(gt_timelines, intel_timeline_live_selftests) selftest(gt_contexts, intel_context_live_selftests) selftest(gt_lrc, intel_lrc_live_selftests) +selftest(gt_mocs, intel_mocs_live_selftests) selftest(gt_pm, intel_gt_pm_live_selftests) +selftest(gt_heartbeat, intel_heartbeat_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) @@ -32,7 +34,9 @@ selftest(gem_contexts, i915_gem_context_live_selftests) selftest(blt, i915_gem_object_blt_live_selftests) selftest(client, i915_gem_client_blt_live_selftests) selftest(reset, intel_reset_live_selftests) +selftest(memory_region, intel_memory_region_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests) selftest(execlists, intel_execlists_live_selftests) -selftest(guc, intel_guc_live_selftest) selftest(perf, i915_perf_live_selftests) +/* Here be dragons: keep last to run last! */ +selftest(late_gt_pm, intel_gt_pm_late_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index dc6d689e4251..d1a1568c47ba 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -23,7 +23,8 @@ test_stream(struct i915_perf *perf) I915_ENGINE_CLASS_RENDER, 0), .sample_flags = SAMPLE_OA_REPORT, - .oa_format = I915_OA_FORMAT_C4_B8, + .oa_format = IS_GEN(perf->i915, 12) ? + I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8, .metrics_set = 1, }; struct i915_perf_stream *stream; @@ -131,7 +132,7 @@ static int live_noa_delay(void *arg) for (i = 0; i < 4; i++) intel_write_status_page(stream->engine, 0x100 + i, 0); - rq = i915_request_create(stream->engine->kernel_context); + rq = intel_engine_create_kernel_request(stream->engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out; diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h new file mode 100644 index 000000000000..f7129a243daa --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* List each unit test as selftest(name, function) + * + * The name is used as both an enum and expanded as subtest__name to create + * a module parameter. It must be unique and legal for a C identifier. + * + * The function should be of type int function(void). It may be conditionally + * compiled using #if IS_ENABLED(DRM_I915_SELFTEST). + * + * Tests are executed in order by igt/i915_selftest + */ +selftest(engine_cs, intel_engine_cs_perf_selftests) +selftest(blt, i915_gem_object_blt_perf_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 30ae34f62176..f89d9c42f1fa 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -27,11 +27,13 @@ #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "i915_random.h" #include "i915_selftest.h" #include "igt_live_test.h" +#include "igt_spinner.h" #include "lib_sw_fence.h" #include "mock_drm.h" @@ -464,6 +466,7 @@ static int mock_breadcrumbs_smoketest(void *arg) get_task_struct(threads[n]); } + yield(); /* start all threads before we begin */ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); for (n = 0; n < ncpus; n++) { @@ -539,6 +542,7 @@ static int live_nop_request(void *arg) if (err) return err; + intel_engine_pm_get(engine); for_each_prime_number_from(prime, 1, 8192) { struct i915_request *request = NULL; @@ -577,6 +581,7 @@ static int live_nop_request(void *arg) if (__igt_timeout(end_time, NULL)) break; } + intel_engine_pm_put(engine); err = igt_live_test_end(&t); if (err) @@ -691,10 +696,13 @@ static int live_empty_request(void *arg) if (err) goto out_batch; + intel_engine_pm_get(engine); + /* Warmup / preload */ request = empty_request(engine, batch); if (IS_ERR(request)) { err = PTR_ERR(request); + intel_engine_pm_put(engine); goto out_batch; } i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); @@ -707,6 +715,7 @@ static int live_empty_request(void *arg) request = empty_request(engine, batch); if (IS_ERR(request)) { err = PTR_ERR(request); + intel_engine_pm_put(engine); goto out_batch; } } @@ -720,6 +729,7 @@ static int live_empty_request(void *arg) break; } i915_request_put(request); + intel_engine_pm_put(engine); err = igt_live_test_end(&t); if (err) @@ -739,10 +749,8 @@ out_batch: static struct i915_vma *recursive_batch(struct drm_i915_private *i915) { - struct i915_gem_context *ctx = i915->kernel_context; struct drm_i915_gem_object *obj; const int gen = INTEL_GEN(i915); - struct i915_address_space *vm; struct i915_vma *vma; u32 *cmd; int err; @@ -751,9 +759,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (IS_ERR(obj)) return ERR_CAST(obj); - vm = i915_gem_context_get_vm_rcu(ctx); - vma = i915_vma_instance(obj, vm, NULL); - i915_vm_put(vm); + vma = i915_vma_instance(obj, i915->gt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -844,7 +850,7 @@ static int live_all_engines(void *arg) idx = 0; for_each_uabi_engine(engine, i915) { - request[idx] = i915_request_create(engine->kernel_context); + request[idx] = intel_engine_create_kernel_request(engine); if (IS_ERR(request[idx])) { err = PTR_ERR(request[idx]); pr_err("%s: Request allocation failed with err=%d\n", @@ -961,7 +967,7 @@ static int live_sequential_engines(void *arg) goto out_free; } - request[idx] = i915_request_create(engine->kernel_context); + request[idx] = intel_engine_create_kernel_request(engine); if (IS_ERR(request[idx])) { err = PTR_ERR(request[idx]); pr_err("%s: Request allocation failed for %s with err=%d\n", @@ -1066,15 +1072,18 @@ static int __live_parallel_engine1(void *arg) struct intel_engine_cs *engine = arg; IGT_TIMEOUT(end_time); unsigned long count; + int err = 0; count = 0; + intel_engine_pm_get(engine); do { struct i915_request *rq; - int err; rq = i915_request_create(engine->kernel_context); - if (IS_ERR(rq)) - return PTR_ERR(rq); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } i915_request_get(rq); i915_request_add(rq); @@ -1084,13 +1093,14 @@ static int __live_parallel_engine1(void *arg) err = -ETIME; i915_request_put(rq); if (err) - return err; + break; count++; } while (!__igt_timeout(end_time, NULL)); + intel_engine_pm_put(engine); pr_info("%s: %lu request + sync\n", engine->name, count); - return 0; + return err; } static int __live_parallel_engineN(void *arg) @@ -1098,21 +1108,100 @@ static int __live_parallel_engineN(void *arg) struct intel_engine_cs *engine = arg; IGT_TIMEOUT(end_time); unsigned long count; + int err = 0; count = 0; + intel_engine_pm_get(engine); do { struct i915_request *rq; rq = i915_request_create(engine->kernel_context); - if (IS_ERR(rq)) - return PTR_ERR(rq); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } i915_request_add(rq); count++; } while (!__igt_timeout(end_time, NULL)); + intel_engine_pm_put(engine); pr_info("%s: %lu requests\n", engine->name, count); - return 0; + return err; +} + +static bool wake_all(struct drm_i915_private *i915) +{ + if (atomic_dec_and_test(&i915->selftest.counter)) { + wake_up_var(&i915->selftest.counter); + return true; + } + + return false; +} + +static int wait_for_all(struct drm_i915_private *i915) +{ + if (wake_all(i915)) + return 0; + + if (wait_var_event_timeout(&i915->selftest.counter, + !atomic_read(&i915->selftest.counter), + i915_selftest.timeout_jiffies)) + return 0; + + return -ETIME; +} + +static int __live_parallel_spin(void *arg) +{ + struct intel_engine_cs *engine = arg; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + /* + * Create a spinner running for eternity on each engine. If a second + * spinner is incorrectly placed on the same engine, it will not be + * able to start in time. + */ + + if (igt_spinner_init(&spin, engine->gt)) { + wake_all(engine->i915); + return -ENOMEM; + } + + intel_engine_pm_get(engine); + rq = igt_spinner_create_request(&spin, + engine->kernel_context, + MI_NOOP); /* no preemption */ + intel_engine_pm_put(engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + if (err == -ENODEV) + err = 0; + wake_all(engine->i915); + goto out_spin; + } + + i915_request_get(rq); + i915_request_add(rq); + if (igt_wait_for_spinner(&spin, rq)) { + /* Occupy this engine for the whole test */ + err = wait_for_all(engine->i915); + } else { + pr_err("Failed to start spinner on %s\n", engine->name); + err = -EINVAL; + } + igt_spinner_end(&spin); + + if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) + err = -EIO; + i915_request_put(rq); + +out_spin: + igt_spinner_fini(&spin); + return err; } static int live_parallel_engines(void *arg) @@ -1121,6 +1210,7 @@ static int live_parallel_engines(void *arg) static int (* const func[])(void *arg) = { __live_parallel_engine1, __live_parallel_engineN, + __live_parallel_spin, NULL, }; const unsigned int nengines = num_uabi_engines(i915); @@ -1139,13 +1229,17 @@ static int live_parallel_engines(void *arg) return -ENOMEM; for (fn = func; !err && *fn; fn++) { + char name[KSYM_NAME_LEN]; struct igt_live_test t; unsigned int idx; - err = igt_live_test_begin(&t, i915, __func__, ""); + snprintf(name, sizeof(name), "%pS", fn); + err = igt_live_test_begin(&t, i915, __func__, name); if (err) break; + atomic_set(&i915->selftest.counter, nengines); + idx = 0; for_each_uabi_engine(engine, i915) { tsk[idx] = kthread_run(*fn, engine, @@ -1158,6 +1252,8 @@ static int live_parallel_engines(void *arg) get_task_struct(tsk[idx++]); } + yield(); /* start all threads before we kthread_stop() */ + idx = 0; for_each_uabi_engine(engine, i915) { int status; @@ -1227,9 +1323,9 @@ static int live_breadcrumbs_smoketest(void *arg) struct task_struct **threads; struct igt_live_test live; intel_wakeref_t wakeref; - struct drm_file *file; struct smoketest *smoke; unsigned int n, idx; + struct file *file; int ret = 0; /* @@ -1314,6 +1410,7 @@ static int live_breadcrumbs_smoketest(void *arg) idx++; } + yield(); /* start all threads before we begin */ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); out_flush: @@ -1350,7 +1447,7 @@ out_threads: out_smoke: kfree(smoke); out_file: - mock_file_free(i915, file); + fput(file); out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 825a8286cbe8..d3bf9eefb682 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -23,13 +23,14 @@ #include <linux/random.h> -#include "../i915_drv.h" -#include "../i915_selftest.h" +#include "gt/intel_gt_pm.h" +#include "i915_drv.h" +#include "i915_selftest.h" #include "igt_flush_test.h" struct i915_selftest i915_selftest __read_mostly = { - .timeout_ms = 1000, + .timeout_ms = 500, }; int i915_mock_sanitycheck(void) @@ -56,6 +57,12 @@ enum { #undef selftest }; +enum { +#define selftest(name, func) perf_##name, +#include "i915_perf_selftests.h" +#undef selftest +}; + struct selftest { bool enabled; const char *name; @@ -77,6 +84,12 @@ static struct selftest live_selftests[] = { }; #undef selftest +#define selftest(n, f) [perf_##n] = { .name = #n, { .live = f } }, +static struct selftest perf_selftests[] = { +#include "i915_perf_selftests.h" +}; +#undef selftest + /* Embed the line number into the parameter name so that we can order tests */ #define selftest(n, func) selftest_0(n, func, param(n)) #define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n)) @@ -92,6 +105,13 @@ module_param_named(id, live_selftests[live_##n].enabled, bool, 0400); #include "i915_live_selftests.h" #undef selftest_0 #undef param + +#define param(n) __PASTE(igt__, __PASTE(__LINE__, __perf_##n)) +#define selftest_0(n, func, id) \ +module_param_named(id, perf_selftests[perf_##n].enabled, bool, 0400); +#include "i915_perf_selftests.h" +#undef selftest_0 +#undef param #undef selftest static void set_default_test_all(struct selftest *st, unsigned int count) @@ -199,6 +219,27 @@ int i915_live_selftests(struct pci_dev *pdev) return 0; } +int i915_perf_selftests(struct pci_dev *pdev) +{ + int err; + + if (!i915_selftest.perf) + return 0; + + err = run_selftests(perf, pdev_to_i915(pdev)); + if (err) { + i915_selftest.perf = err; + return err; + } + + if (i915_selftest.perf < 0) { + i915_selftest.perf = -ENOTTY; + return 1; + } + + return 0; +} + static bool apply_subtest_filter(const char *caller, const char *name) { char *filter, *sep, *tok; @@ -256,6 +297,10 @@ int __i915_live_setup(void *data) { struct drm_i915_private *i915 = data; + /* The selftests expect an idle system */ + if (intel_gt_pm_wait_for_idle(&i915->gt)) + return -EIO; + return intel_gt_terminally_wedged(&i915->gt); } @@ -275,6 +320,10 @@ int __intel_gt_live_setup(void *data) { struct intel_gt *gt = data; + /* The selftests expect an idle system */ + if (intel_gt_pm_wait_for_idle(gt)) + return -EIO; + return intel_gt_terminally_wedged(gt); } @@ -356,3 +405,6 @@ MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardw module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400); MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)"); + +module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400); +MODULE_PARM_DESC(perf_selftests, "Run performance orientated selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)"); diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c index 810b60100c2c..c130010a7033 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -16,6 +16,7 @@ int igt_live_test_begin(struct igt_live_test *t, const char *func, const char *name) { + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; enum intel_engine_id id; int err; @@ -24,7 +25,7 @@ int igt_live_test_begin(struct igt_live_test *t, t->func = func; t->name = name; - err = intel_gt_wait_for_idle(&i915->gt, MAX_SCHEDULE_TIMEOUT); + err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); @@ -33,7 +34,7 @@ int igt_live_test_begin(struct igt_live_test *t, t->reset_global = i915_reset_count(&i915->gpu_error); - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) t->reset_engine[id] = i915_reset_engine_count(&i915->gpu_error, engine); @@ -56,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t) return -EIO; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, &i915->gt, id) { if (t->reset_engine[id] == i915_reset_engine_count(&i915->gpu_error, engine)) continue; diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c new file mode 100644 index 000000000000..583a4ff8b8c9 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <drm/drm_file.h> + +#include "i915_drv.h" +#include "igt_mmap.h" + +unsigned long igt_mmap_node(struct drm_i915_private *i915, + struct drm_vma_offset_node *node, + unsigned long addr, + unsigned long prot, + unsigned long flags) +{ + struct file *file; + int err; + + /* Pretend to open("/dev/dri/card0") */ + file = mock_drm_getfile(i915->drm.primary, O_RDWR); + if (IS_ERR(file)) + return PTR_ERR(file); + + err = drm_vma_node_allow(node, file->private_data); + if (err) { + addr = err; + goto out_file; + } + + addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT, + prot, flags, drm_vma_node_offset_addr(node)); + + drm_vma_node_revoke(node, file->private_data); +out_file: + fput(file); + return addr; +} diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h new file mode 100644 index 000000000000..6e716cb59d7e --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef IGT_MMAP_H +#define IGT_MMAP_H + +struct drm_i915_private; +struct drm_vma_offset_node; + +unsigned long igt_mmap_node(struct drm_i915_private *i915, + struct drm_vma_offset_node *node, + unsigned long addr, + unsigned long prot, + unsigned long flags); + +#endif /* IGT_MMAP_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index ee8450b871da..e8a58fe49c39 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) void *vaddr; int err; - GEM_BUG_ON(INTEL_GEN(gt->i915) < 8); - memset(spin, 0, sizeof(*spin)); spin->gt = gt; @@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin, struct intel_engine_cs *engine = ce->engine; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; + unsigned int flags; u32 *batch; int err; GEM_BUG_ON(spin->gt != ce->vm->gt); + if (!intel_engine_can_store_dword(ce->engine)) + return ERR_PTR(-ENODEV); + vma = i915_vma_instance(spin->obj, ce->vm, NULL); if (IS_ERR(vma)) return ERR_CAST(vma); @@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin, batch = spin->batch; - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); + if (INTEL_GEN(rq->i915) >= 8) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = upper_32_bits(hws_address(hws, rq)); + } else if (INTEL_GEN(rq->i915) >= 6) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = 0; + *batch++ = hws_address(hws, rq); + } else if (INTEL_GEN(rq->i915) >= 4) { + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *batch++ = 0; + *batch++ = hws_address(hws, rq); + } else { + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *batch++ = hws_address(hws, rq); + } *batch++ = rq->fence.seqno; *batch++ = arbitration_command; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; + if (INTEL_GEN(rq->i915) >= 8) + *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1; + else if (IS_HASWELL(rq->i915)) + *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW; + else if (INTEL_GEN(rq->i915) >= 6) + *batch++ = MI_BATCH_BUFFER_START; + else + *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; *batch++ = lower_32_bits(vma->node.start); *batch++ = upper_32_bits(vma->node.start); + *batch++ = MI_BATCH_BUFFER_END; /* not reached */ intel_gt_chipset_flush(engine->gt); @@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin, goto cancel_rq; } - err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); + flags = 0; + if (INTEL_GEN(rq->i915) <= 5) + flags |= I915_DISPATCH_SECURE; + err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); cancel_rq: if (err) { diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 56091e7e599e..04d0aa7b349e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -11,8 +11,15 @@ #include "mock_gem_device.h" #include "mock_region.h" +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" +#include "gem/i915_gem_object_blt.h" +#include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_engine_user.h" +#include "gt/intel_gt.h" +#include "selftests/igt_flush_test.h" #include "selftests/i915_random.h" static void close_objects(struct intel_memory_region *mem, @@ -25,7 +32,7 @@ static void close_objects(struct intel_memory_region *mem, if (i915_gem_object_has_pinned_pages(obj)) i915_gem_object_unpin_pages(obj); /* No polluting the memory region between tests */ - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -115,7 +122,7 @@ put: static void igt_object_release(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + __i915_gem_object_put_pages(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -252,6 +259,324 @@ err_close_objects: return err; } +static int igt_gpu_write_dw(struct intel_context *ce, + struct i915_vma *vma, + u32 dword, + u32 value) +{ + return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32), + vma->size >> PAGE_SHIFT, value); +} + +static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + unsigned long n; + int err; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, false); + i915_gem_object_unlock(obj); + if (err) + return err; + + err = i915_gem_object_pin_pages(obj); + if (err) + return err; + + for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { + u32 __iomem *base; + u32 read_val; + + base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + + read_val = ioread32(base + dword); + io_mapping_unmap_atomic(base); + if (read_val != val) { + pr_err("n=%lu base[%u]=%u, val=%u\n", + n, dword, read_val, val); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_pages(obj); + return err; +} + +static int igt_gpu_write(struct i915_gem_context *ctx, + struct drm_i915_gem_object *obj) +{ + struct i915_gem_engines *engines; + struct i915_gem_engines_iter it; + struct i915_address_space *vm; + struct intel_context *ce; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + unsigned int count; + struct i915_vma *vma; + int *order; + int i, n; + int err = 0; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + n = 0; + count = 0; + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + count++; + if (!intel_engine_can_store_dword(ce->engine)) + continue; + + vm = ce->vm; + n++; + } + i915_gem_context_unlock_engines(ctx); + if (!n) + return 0; + + order = i915_random_order(count * count, &prng); + if (!order) + return -ENOMEM; + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_free; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_free; + + i = 0; + engines = i915_gem_context_lock_engines(ctx); + do { + u32 rng = prandom_u32_state(&prng); + u32 dword = offset_in_page(rng) / 4; + + ce = engines->engines[order[i] % engines->num_engines]; + i = (i + 1) % (count * count); + if (!ce || !intel_engine_can_store_dword(ce->engine)) + continue; + + err = igt_gpu_write_dw(ce, vma, dword, rng); + if (err) + break; + + err = igt_cpu_check(obj, dword, rng); + if (err) + break; + } while (!__igt_timeout(end_time, NULL)); + i915_gem_context_unlock_engines(ctx); + +out_free: + kfree(order); + + if (err == -ENOMEM) + err = 0; + + return err; +} + +static int igt_lmem_create(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + int err = 0; + + obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static int igt_lmem_write_gpu(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct file *file; + I915_RND_STATE(prng); + u32 sz; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE); + + obj = i915_gem_object_create_lmem(i915, sz, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_file; + } + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + err = igt_gpu_write(ctx, obj); + if (err) + pr_err("igt_gpu_write failed(%d)\n", err); + + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); +out_file: + fput(file); + return err; +} + +static struct intel_engine_cs * +random_engine_class(struct drm_i915_private *i915, + unsigned int class, + struct rnd_state *prng) +{ + struct intel_engine_cs *engine; + unsigned int count; + + count = 0; + for (engine = intel_engine_lookup_user(i915, class, 0); + engine && engine->uabi_class == class; + engine = rb_entry_safe(rb_next(&engine->uabi_node), + typeof(*engine), uabi_node)) + count++; + + count = i915_prandom_u32_max_state(count, prng); + return intel_engine_lookup_user(i915, class, count); +} + +static int igt_lmem_write_cpu(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + u32 bytes[] = { + 0, /* rng placeholder */ + sizeof(u32), + sizeof(u64), + 64, /* cl */ + PAGE_SIZE, + PAGE_SIZE - sizeof(u32), + PAGE_SIZE - sizeof(u64), + PAGE_SIZE - 64, + }; + struct intel_engine_cs *engine; + u32 *vaddr; + u32 sz; + u32 i; + int *order; + int count; + int err; + + engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng); + if (!engine) + return 0; + + pr_info("%s: using %s\n", __func__, engine->name); + + sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE); + sz = max_t(u32, 2 * PAGE_SIZE, sz); + + obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out_put; + } + + /* Put the pages into a known state -- from the gpu for added fun */ + intel_engine_pm_get(engine); + err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf); + intel_engine_pm_put(engine); + if (err) + goto out_unpin; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, true); + i915_gem_object_unlock(obj); + if (err) + goto out_unpin; + + count = ARRAY_SIZE(bytes); + order = i915_random_order(count * count, &prng); + if (!order) { + err = -ENOMEM; + goto out_unpin; + } + + /* We want to throw in a random width/align */ + bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32), + sizeof(u32)); + + i = 0; + do { + u32 offset; + u32 align; + u32 dword; + u32 size; + u32 val; + + size = bytes[order[i] % count]; + i = (i + 1) % (count * count); + + align = bytes[order[i] % count]; + i = (i + 1) % (count * count); + + align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align)); + + offset = igt_random_offset(&prng, 0, obj->base.size, + size, align); + + val = prandom_u32_state(&prng); + memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf, + size / sizeof(u32)); + + /* + * Sample random dw -- don't waste precious time reading every + * single dw. + */ + dword = igt_random_offset(&prng, offset, + offset + size, + sizeof(u32), sizeof(u32)); + dword /= sizeof(u32); + if (vaddr[dword] != (val ^ 0xdeadbeaf)) { + pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n", + __func__, dword, vaddr[dword], val ^ 0xdeadbeaf, + size, align, offset); + err = -EINVAL; + break; + } + } while (!__igt_timeout(end_time, NULL)); + +out_unpin: + i915_gem_object_unpin_map(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -280,3 +605,22 @@ out_unref: drm_dev_put(&i915->drm); return err; } + +int intel_memory_region_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_lmem_create), + SUBTEST(igt_lmem_write_cpu), + SUBTEST(igt_lmem_write_gpu), + }; + + if (!HAS_LMEM(i915)) { + pr_info("device lacks LMEM support, skipping\n"); + return 0; + } + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_live_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 0ffb141eb988..0e4e6be0101d 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -140,19 +140,19 @@ static int live_forcewake_ops(void *arg) } }; const struct reg *r; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_uncore_forcewake_domain *domain; - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; unsigned int tmp; int err = 0; - GEM_BUG_ON(i915->gt.awake); + GEM_BUG_ON(gt->awake); /* vlv/chv with their pcu behave differently wrt reads */ - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) { pr_debug("PCU fakes forcewake badly; skipping\n"); return 0; } @@ -170,15 +170,15 @@ static int live_forcewake_ops(void *arg) /* We have to pick carefully to get the exact behaviour we need */ for (r = registers; r->name; r++) - if (r->platforms & INTEL_INFO(i915)->gen_mask) + if (r->platforms & INTEL_INFO(gt->i915)->gen_mask) break; if (!r->name) { pr_debug("Forcewaked register not known for %s; skipping\n", - intel_platform_name(INTEL_INFO(i915)->platform)); + intel_platform_name(INTEL_INFO(gt->i915)->platform)); return 0; } - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_runtime_pm_get(uncore->rpm); for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); @@ -188,7 +188,7 @@ static int live_forcewake_ops(void *arg) intel_uncore_fw_release_timer(&domain->timer); } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset); u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset; enum forcewake_domains fw_domains; @@ -249,22 +249,22 @@ static int live_forcewake_ops(void *arg) } out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(uncore->rpm, wakeref); return err; } static int live_forcewake_domains(void *arg) { #define FW_RANGE 0x40000 - struct drm_i915_private *dev_priv = arg; - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = arg; + struct intel_uncore *uncore = gt->uncore; unsigned long *valid; u32 offset; int err; - if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) && - !IS_VALLEYVIEW(dev_priv) && - !IS_CHERRYVIEW(dev_priv)) + if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) && + !IS_VALLEYVIEW(gt->i915) && + !IS_CHERRYVIEW(gt->i915)) return 0; /* @@ -283,7 +283,7 @@ static int live_forcewake_domains(void *arg) for (offset = 0; offset < FW_RANGE; offset += 4) { i915_reg_t reg = { offset }; - (void)I915_READ_FW(reg); + intel_uncore_posting_read_fw(uncore, reg); if (!check_for_unclaimed_mmio(uncore)) set_bit(offset, valid); } @@ -300,7 +300,7 @@ static int live_forcewake_domains(void *arg) check_for_unclaimed_mmio(uncore); - (void)I915_READ(reg); + intel_uncore_posting_read_fw(uncore, reg); if (check_for_unclaimed_mmio(uncore)) { pr_err("Unclaimed mmio read to register 0x%04x\n", offset); @@ -312,21 +312,23 @@ static int live_forcewake_domains(void *arg) return err; } +static int live_fw_table(void *arg) +{ + struct intel_gt *gt = arg; + + /* Confirm the table we load is still valid */ + return intel_fw_table_check(gt->uncore->fw_domains_table, + gt->uncore->fw_domains_table_entries, + INTEL_GEN(gt->i915) >= 9); +} + int intel_uncore_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(live_fw_table), SUBTEST(live_forcewake_ops), SUBTEST(live_forcewake_domains), }; - int err; - - /* Confirm the table we load is still valid */ - err = intel_fw_table_check(i915->uncore.fw_domains_table, - i915->uncore.fw_domains_table_entries, - INTEL_GEN(i915) >= 9); - if (err) - return err; - - return i915_subtests(tests, i915); + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c deleted file mode 100644 index 09c704153456..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_drm.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "mock_drm.h" - -struct drm_file *mock_file(struct drm_i915_private *i915) -{ - struct file *filp; - struct inode *inode; - struct drm_file *file; - int err; - - inode = kzalloc(sizeof(*inode), GFP_KERNEL); - if (!inode) { - err = -ENOMEM; - goto err; - } - - inode->i_rdev = i915->drm.primary->index; - - filp = kzalloc(sizeof(*filp), GFP_KERNEL); - if (!filp) { - err = -ENOMEM; - goto err_inode; - } - - err = drm_open(inode, filp); - if (err) - goto err_filp; - - file = filp->private_data; - memset(&file->filp, POISON_INUSE, sizeof(file->filp)); - file->authenticated = true; - - kfree(filp); - kfree(inode); - return file; - -err_filp: - kfree(filp); -err_inode: - kfree(inode); -err: - return ERR_PTR(err); -} - -void mock_file_free(struct drm_i915_private *i915, struct drm_file *file) -{ - struct file filp = { .private_data = file }; - - drm_release(NULL, &filp); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h index b39beee9f8f6..9916b6f95526 100644 --- a/drivers/gpu/drm/i915/selftests/mock_drm.h +++ b/drivers/gpu/drm/i915/selftests/mock_drm.h @@ -25,7 +25,21 @@ #ifndef __MOCK_DRM_H #define __MOCK_DRM_H -struct drm_file *mock_file(struct drm_i915_private *i915); -void mock_file_free(struct drm_i915_private *i915, struct drm_file *file); +#include <drm/drm_file.h> + +#include "i915_drv.h" + +struct drm_file; +struct file; + +static inline struct file *mock_file(struct drm_i915_private *i915) +{ + return mock_drm_getfile(i915->drm.primary, O_RDWR); +} + +static inline struct drm_file *to_drm_file(struct file *f) +{ + return f->private_data; +} #endif /* !__MOCK_DRM_H */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index cb8c3a501cc7..ac641f5360e1 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -28,6 +28,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "gt/mock_engine.h" +#include "intel_memory_region.h" #include "mock_request.h" #include "mock_gem_device.h" @@ -40,39 +41,31 @@ void mock_device_flush(struct drm_i915_private *i915) { + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; enum intel_engine_id id; do { - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) mock_engine_flush(engine); - } while (intel_gt_retire_requests_timeout(&i915->gt, - MAX_SCHEDULE_TIMEOUT)); + } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT)); } static void mock_device_release(struct drm_device *dev) { struct drm_i915_private *i915 = to_i915(dev); - struct intel_engine_cs *engine; - enum intel_engine_id id; mock_device_flush(i915); + intel_gt_driver_remove(&i915->gt); i915_gem_drain_workqueue(i915); - - for_each_engine(engine, i915, id) - mock_engine_free(engine); - i915_gem_driver_release__contexts(i915); - - intel_timelines_fini(i915); - - drain_workqueue(i915->wq); i915_gem_drain_freed_objects(i915); mock_fini_ggtt(&i915->ggtt); destroy_workqueue(i915->wq); - i915_gem_cleanup_memory_regions(i915); + intel_gt_driver_late_release(&i915->gt); + intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); @@ -164,6 +157,7 @@ struct drm_i915_private *mock_gem_device(void) I915_GTT_PAGE_SIZE_2M; mkwrite_device_info(i915)->memory_regions = REGION_SMEM; + intel_memory_regions_hw_probe(i915); mock_uncore_init(&i915->uncore, i915); @@ -178,9 +172,8 @@ struct drm_i915_private *mock_gem_device(void) mock_init_contexts(i915); - intel_timelines_init(i915); - mock_init_ggtt(i915, &i915->ggtt); + i915->gt.vm = i915_vm_get(&i915->ggtt.vm); mkwrite_device_info(i915)->engine_mask = BIT(0); @@ -188,29 +181,20 @@ struct drm_i915_private *mock_gem_device(void) if (!i915->engine[RCS0]) goto err_unlock; - i915->kernel_context = mock_context(i915, NULL); - if (!i915->kernel_context) - goto err_engine; - if (mock_engine_init(i915->engine[RCS0])) goto err_context; intel_engines_driver_register(i915); - err = i915_gem_init_memory_regions(i915); - if (err) - goto err_context; - return i915; err_context: - i915_gem_driver_release__contexts(i915); -err_engine: - mock_engine_free(i915->engine[RCS0]); + intel_gt_driver_remove(&i915->gt); err_unlock: - intel_timelines_fini(i915); destroy_workqueue(i915->wq); err_drv: + intel_gt_driver_late_release(&i915->gt); + intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); drm_dev_fini(&i915->drm); put_device: diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 173f2d4dbd14..20ac3844edec 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -63,6 +63,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) if (!ppgtt) return NULL; + ppgtt->vm.gt = &i915->gt; ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); @@ -117,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); - - intel_gt_init_hw_early(i915); + i915->gt.ggtt = ggtt; } void mock_fini_ggtt(struct i915_ggtt *ggtt) diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h index 3387393286de..e3f224f43beb 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.h +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h @@ -25,6 +25,9 @@ #ifndef __MOCK_GTT_H #define __MOCK_GTT_H +struct drm_i915_private; +struct i915_ggtt; + void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt); void mock_fini_ggtt(struct i915_ggtt *ggtt); diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 7b0c99ddc2d5..b2ad41c27e67 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -19,6 +19,7 @@ mock_object_create(struct intel_memory_region *mem, resource_size_t size, unsigned int flags) { + static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; struct drm_i915_gem_object *obj; @@ -30,7 +31,7 @@ mock_object_create(struct intel_memory_region *mem, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &mock_region_obj_ops); + i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h index 24608089d833..329bf74dfaca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.h +++ b/drivers/gpu/drm/i915/selftests/mock_region.h @@ -6,6 +6,11 @@ #ifndef __MOCK_REGION_H #define __MOCK_REGION_H +#include <linux/types.h> + +struct drm_i915_private; +struct intel_memory_region; + struct intel_memory_region * mock_region_create(struct drm_i915_private *i915, resource_size_t start, diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h index 8a2cc553f466..7acf1ef4d488 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.h +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h @@ -25,6 +25,9 @@ #ifndef __MOCK_UNCORE_H #define __MOCK_UNCORE_H +struct drm_i915_private; +struct intel_uncore; + void mock_uncore_init(struct intel_uncore *uncore, struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 32e94bcb4a69..bb6528b01cd0 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -1067,11 +1067,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, for_each_available_child_of_node(dev->of_node, child) { panel = of_drm_find_panel(child); if (IS_ERR(panel)) { - dev_err(dev, "failed to find panel try bridge (%lu)\n", + dev_err(dev, "failed to find panel try bridge (%ld)\n", PTR_ERR(panel)); + panel = NULL; + bridge = of_drm_find_bridge(child); if (IS_ERR(bridge)) { - dev_err(dev, "failed to find bridge (%lu)\n", + dev_err(dev, "failed to find bridge (%ld)\n", PTR_ERR(bridge)); return PTR_ERR(bridge); } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 14878ebf59d7..4a55bb6e2213 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -3,6 +3,8 @@ * Copyright (c) 2015 MediaTek Inc. */ +#include <drm/drm_fourcc.h> + #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> @@ -50,6 +52,8 @@ OVL_CON_CLRFMT_RGB : 0) #define OVL_CON_AEN BIT(8) #define OVL_CON_ALPHA 0xff +#define OVL_CON_VIRT_FLIP BIT(9) +#define OVL_CON_HORZ_FLIP BIT(10) struct mtk_disp_ovl_data { unsigned int addr; @@ -137,6 +141,40 @@ static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) return ovl->data->layer_nr; } +static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp) +{ + return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; +} + +static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, + struct mtk_plane_state *mtk_state) +{ + struct drm_plane_state *state = &mtk_state->base; + unsigned int rotation = 0; + + rotation = drm_rotation_simplify(state->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + rotation &= ~DRM_MODE_ROTATE_0; + + /* We can only do reflection, not rotation */ + if ((rotation & DRM_MODE_ROTATE_MASK) != 0) + return -EINVAL; + + /* + * TODO: Rotating/reflecting YUV buffers is not supported at this time. + * Only RGB[AX] variants are supported. + */ + if (state->fb->format->is_yuv && rotation != 0) + return -EINVAL; + + state->rotation = rotation; + + return 0; +} + static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { unsigned int reg; @@ -229,6 +267,16 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, if (idx != 0) con |= OVL_CON_AEN | OVL_CON_ALPHA; + if (pending->rotation & DRM_MODE_REFLECT_Y) { + con |= OVL_CON_VIRT_FLIP; + addr += (pending->height - 1) * pending->pitch; + } + + if (pending->rotation & DRM_MODE_REFLECT_X) { + con |= OVL_CON_HORZ_FLIP; + addr += pending->pitch - 1; + } + writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); @@ -263,9 +311,11 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .stop = mtk_ovl_stop, .enable_vblank = mtk_ovl_enable_vblank, .disable_vblank = mtk_ovl_disable_vblank, + .supported_rotations = mtk_ovl_supported_rotations, .layer_nr = mtk_ovl_layer_nr, .layer_on = mtk_ovl_layer_on, .layer_off = mtk_ovl_layer_off, + .layer_check = mtk_ovl_layer_check, .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, .bgclr_in_off = mtk_ovl_bgclr_in_off, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index b841d3706d8b..f80a8ba75977 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -207,6 +207,28 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); } +static +struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, + struct drm_plane *plane, + unsigned int *local_layer) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp; + int i, count = 0; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + comp = mtk_crtc->ddp_comp[i]; + if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = plane->index - count; + return comp; + } + count += mtk_ddp_comp_layer_nr(comp); + } + + WARN(1, "Failed to find component for plane %d\n", plane->index); + return NULL; +} + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -283,19 +305,12 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - unsigned int comp_layer_nr = mtk_ddp_comp_layer_nr(comp); + struct mtk_ddp_comp *comp; unsigned int local_layer; plane_state = to_mtk_plane_state(plane->state); - - if (i >= comp_layer_nr) { - comp = mtk_crtc->ddp_comp[1]; - local_layer = i - comp_layer_nr; - } else - local_layer = i; - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state); + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + mtk_ddp_comp_layer_config(comp, local_layer, plane_state); } return 0; @@ -343,7 +358,6 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; - unsigned int comp_layer_nr = mtk_ddp_comp_layer_nr(comp); unsigned int local_layer; /* @@ -366,22 +380,30 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.config) { - if (i >= comp_layer_nr) { - comp = mtk_crtc->ddp_comp[1]; - local_layer = i - comp_layer_nr; - } else - local_layer = i; - - mtk_ddp_comp_layer_config(comp, local_layer, - plane_state); - plane_state->pending.config = false; - } + if (!plane_state->pending.config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state); + plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } } +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state) +{ + unsigned int local_layer; + struct mtk_ddp_comp *comp; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + return mtk_ddp_comp_layer_check(comp, local_layer, state); +} + static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -543,14 +565,65 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) mtk_drm_finish_page_flip(mtk_crtc); } +static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, + int comp_idx) +{ + struct mtk_ddp_comp *comp; + + if (comp_idx > 1) + return 0; + + comp = mtk_crtc->ddp_comp[comp_idx]; + if (!comp->funcs) + return 0; + + if (comp_idx == 1 && !comp->funcs->bgclr_in_on) + return 0; + + return mtk_ddp_comp_layer_nr(comp); +} + +static inline +enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx) +{ + if (plane_idx == 0) + return DRM_PLANE_TYPE_PRIMARY; + else if (plane_idx == 1) + return DRM_PLANE_TYPE_CURSOR; + else + return DRM_PLANE_TYPE_OVERLAY; + +} + +static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, + struct mtk_drm_crtc *mtk_crtc, + int comp_idx, int pipe) +{ + int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; + int i, ret; + + for (i = 0; i < num_planes; i++) { + ret = mtk_plane_init(drm_dev, + &mtk_crtc->planes[mtk_crtc->layer_nr], + BIT(pipe), + mtk_drm_crtc_plane_type(mtk_crtc->layer_nr), + mtk_ddp_comp_supported_rotations(comp)); + if (ret) + return ret; + + mtk_crtc->layer_nr++; + } + return 0; +} + int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len) { struct mtk_drm_private *priv = drm_dev->dev_private; struct device *dev = drm_dev->dev; struct mtk_drm_crtc *mtk_crtc; - enum drm_plane_type type; - unsigned int zpos; + unsigned int num_comp_planes = 0; int pipe = priv->num_pipes; int ret; int i; @@ -606,23 +679,15 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp[i] = comp; } - mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); - if (mtk_crtc->ddp_comp_nr > 1) { - struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[1]; + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); - if (comp->funcs->bgclr_in_on) - mtk_crtc->layer_nr += mtk_ddp_comp_layer_nr(comp); - } - mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr, - sizeof(struct drm_plane), - GFP_KERNEL); - - for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { - type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : - (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : - DRM_PLANE_TYPE_OVERLAY; - ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos], - BIT(pipe), type); + mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, + sizeof(struct drm_plane), GFP_KERNEL); + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, + pipe); if (ret) return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index fcc134eb00c9..6afe1c19557a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -19,5 +19,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len); +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 26441f4d1ad3..2f1e9e75b8da 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -77,9 +77,13 @@ struct mtk_ddp_comp_funcs { void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); + unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); + int (*layer_check)(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); void (*gamma_set)(struct mtk_ddp_comp *comp, @@ -130,6 +134,15 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) comp->funcs->disable_vblank(comp); } +static inline +unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->supported_rotations) + return comp->funcs->supported_rotations(comp); + + return 0; +} + static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) { if (comp->funcs && comp->funcs->layer_nr) @@ -152,6 +165,15 @@ static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, comp->funcs->layer_off(comp, idx); } +static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state) +{ + if (comp->funcs && comp->funcs->layer_check) + return comp->funcs->layer_check(comp, idx, state); + return 0; +} + static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index e8a3493475cd..f0b0325381e0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -19,6 +19,12 @@ static const u32 formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, DRM_FORMAT_UYVY, DRM_FORMAT_YUYV, @@ -83,6 +89,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, { struct drm_framebuffer *fb = state->fb; struct drm_crtc_state *crtc_state; + int ret; if (!fb) return 0; @@ -90,6 +97,11 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, if (WARN_ON(!state->crtc)) return 0; + ret = mtk_drm_crtc_plane_check(state->crtc, plane, + to_mtk_plane_state(state)); + if (ret) + return ret; + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -131,6 +143,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.y = plane->state->dst.y1; state->pending.width = drm_rect_width(&plane->state->dst); state->pending.height = drm_rect_height(&plane->state->dst); + state->pending.rotation = plane->state->rotation; wmb(); /* Make sure the above parameters are set before update */ state->pending.dirty = true; } @@ -153,7 +166,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { }; int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type) + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations) { int err; @@ -165,6 +179,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, return err; } + if (supported_rotations & ~DRM_MODE_ROTATE_0) { + err = drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + supported_rotations); + if (err) + DRM_INFO("Create rotation property failed\n"); + } + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 6f842df722c7..760885e35b27 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -20,6 +20,7 @@ struct mtk_plane_pending_state { unsigned int y; unsigned int width; unsigned int height; + unsigned int rotation; bool dirty; }; @@ -35,6 +36,7 @@ to_mtk_plane_state(struct drm_plane_state *state) } int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type); + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations); #endif diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index 9ab27aecfcf3..1bd6b6d15ffb 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { }, }; +static const struct meson_cvbs_mode * +meson_cvbs_get_mode(const struct drm_display_mode *req_mode) +{ + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + if (drm_mode_match(req_mode, &meson_mode->mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return meson_mode; + } + + return NULL; +} + /* Connector */ static void meson_cvbs_connector_destroy(struct drm_connector *connector) @@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode)) - return 0; - } + if (meson_cvbs_get_mode(&crtc_state->mode)) + return 0; return -EINVAL; } @@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode); struct meson_venc_cvbs *meson_venc_cvbs = encoder_to_meson_venc_cvbs(encoder); struct meson_drm *priv = meson_venc_cvbs->priv; - int i; - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + if (meson_mode) { + meson_venci_cvbs_mode_set(priv, meson_mode->enci); - if (drm_mode_equal(mode, &meson_mode->mode)) { - meson_venci_cvbs_mode_set(priv, - meson_mode->enci); - - /* Setup 27MHz vclk2 for ENCI and VDAC */ - meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - MESON_VCLK_CVBS, true); - break; - } + /* Setup 27MHz vclk2 for ENCI and VDAC */ + meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, true); } } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index ef795ab79330..9f4f5f071add 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -30,7 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400); static struct drm_driver driver; static const struct pci_device_id pciidlist[] = { - { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A }, + { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD}, { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B }, { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV }, { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB }, @@ -94,6 +95,35 @@ static void mga_pci_remove(struct pci_dev *pdev) DEFINE_DRM_GEM_FOPS(mgag200_driver_fops); +static bool mgag200_pin_bo_at_0(const struct mga_device *mdev) +{ + return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD; +} + +int mgag200_driver_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct mga_device *mdev = dev->dev_private; + unsigned long pg_align; + + if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) + return -EINVAL; + + pg_align = 0ul; + + /* + * Aligning scanout buffers to the size of the video ram forces + * placement at offset 0. Works around a bug where HW does not + * respect 'startadd' field. + */ + if (mgag200_pin_bo_at_0(mdev)) + pg_align = PFN_UP(mdev->mc.vram_size); + + return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, + pg_align, false, args); +} + static struct drm_driver driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, .fops = &mgag200_driver_fops, @@ -103,7 +133,10 @@ static struct drm_driver driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - DRM_GEM_VRAM_DRIVER + .debugfs_init = drm_vram_mm_debugfs_init, + .dumb_create = mgag200_driver_dumb_create, + .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, + .gem_prime_mmap = drm_gem_prime_mmap, }; static struct pci_driver mgag200_pci_driver = { diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 0ea9a525e57d..aa32aad222c2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -150,6 +150,12 @@ enum mga_type { G200_EW3, }; +/* HW does not handle 'startadd' field correct. */ +#define MGAG200_FLAG_HW_BUG_NO_STARTADD (1ul << 8) + +#define MGAG200_TYPE_MASK (0x000000ff) +#define MGAG200_FLAG_MASK (0x00ffff00) + #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B) struct mga_device { @@ -181,6 +187,18 @@ struct mga_device { u32 unique_rev_id; }; +static inline enum mga_type +mgag200_type_from_driver_data(kernel_ulong_t driver_data) +{ + return (enum mga_type)(driver_data & MGAG200_TYPE_MASK); +} + +static inline unsigned long +mgag200_flags_from_driver_data(kernel_ulong_t driver_data) +{ + return driver_data & MGAG200_FLAG_MASK; +} + /* mgag200_mode.c */ int mgag200_modeset_init(struct mga_device *mdev); void mgag200_modeset_fini(struct mga_device *mdev); diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 470fa641451a..e278b6a547bd 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -95,7 +95,8 @@ static int mgag200_device_init(struct drm_device *dev, struct mga_device *mdev = dev->dev_private; int ret, option; - mdev->type = flags; + mdev->flags = mgag200_flags_from_driver_data(flags); + mdev->type = mgag200_type_from_driver_data(flags); /* Hardcode the number of CRTCs to 1 */ mdev->num_crtc = 1; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index e9160ce39cbb..6deaa7d01654 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -7,6 +7,7 @@ config DRM_MSM depends on OF && COMMON_CLK depends on MMU depends on INTERCONNECT || !INTERCONNECT + depends on QCOM_OCMEM || QCOM_OCMEM=n select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 5f7e98028eaf..7ad14937fcdf 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -6,10 +6,6 @@ * Copyright (c) 2014 The Linux Foundation. All rights reserved. */ -#ifdef CONFIG_MSM_OCMEM -# include <mach/ocmem.h> -#endif - #include "a3xx_gpu.h" #define A3XX_INT0_MASK \ @@ -195,9 +191,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); /* Set the OCMEM base address for A330, etc */ - if (a3xx_gpu->ocmem_hdl) { + if (a3xx_gpu->ocmem.hdl) { gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, - (unsigned int)(a3xx_gpu->ocmem_base >> 14)); + (unsigned int)(a3xx_gpu->ocmem.base >> 14)); } /* Turn on performance counters: */ @@ -318,10 +314,7 @@ static void a3xx_destroy(struct msm_gpu *gpu) adreno_gpu_cleanup(adreno_gpu); -#ifdef CONFIG_MSM_OCMEM - if (a3xx_gpu->ocmem_base) - ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); -#endif + adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem); kfree(a3xx_gpu); } @@ -494,17 +487,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) /* if needed, allocate gmem: */ if (adreno_is_a330(adreno_gpu)) { -#ifdef CONFIG_MSM_OCMEM - /* TODO this is different/missing upstream: */ - struct ocmem_buf *ocmem_hdl = - ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); - - a3xx_gpu->ocmem_hdl = ocmem_hdl; - a3xx_gpu->ocmem_base = ocmem_hdl->addr; - adreno_gpu->gmem = ocmem_hdl->len; - DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, - a3xx_gpu->ocmem_base); -#endif + ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev, + adreno_gpu, &a3xx_gpu->ocmem); + if (ret) + goto fail; } if (!gpu->aspace) { diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index 5dc33e5ea53b..c555fb13e0d7 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -19,8 +19,7 @@ struct a3xx_gpu { struct adreno_gpu base; /* if OCMEM is used for GMEM: */ - uint32_t ocmem_base; - void *ocmem_hdl; + struct adreno_ocmem ocmem; }; #define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index ab2b752566d8..b01388a9e89e 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -2,9 +2,6 @@ /* Copyright (c) 2014 The Linux Foundation. All rights reserved. */ #include "a4xx_gpu.h" -#ifdef CONFIG_MSM_OCMEM -# include <soc/qcom/ocmem.h> -#endif #define A4XX_INT0_MASK \ (A4XX_INT0_RBBM_AHB_ERROR | \ @@ -188,7 +185,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu) (1 << 30) | 0xFFFF); gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR, - (unsigned int)(a4xx_gpu->ocmem_base >> 14)); + (unsigned int)(a4xx_gpu->ocmem.base >> 14)); /* Turn on performance counters: */ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01); @@ -318,10 +315,7 @@ static void a4xx_destroy(struct msm_gpu *gpu) adreno_gpu_cleanup(adreno_gpu); -#ifdef CONFIG_MSM_OCMEM - if (a4xx_gpu->ocmem_base) - ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl); -#endif + adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem); kfree(a4xx_gpu); } @@ -578,17 +572,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) /* if needed, allocate gmem: */ if (adreno_is_a4xx(adreno_gpu)) { -#ifdef CONFIG_MSM_OCMEM - /* TODO this is different/missing upstream: */ - struct ocmem_buf *ocmem_hdl = - ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); - - a4xx_gpu->ocmem_hdl = ocmem_hdl; - a4xx_gpu->ocmem_base = ocmem_hdl->addr; - adreno_gpu->gmem = ocmem_hdl->len; - DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, - a4xx_gpu->ocmem_base); -#endif + ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, + &a4xx_gpu->ocmem); + if (ret) + goto fail; } if (!gpu->aspace) { diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h index d506311ee240..a01448cba2ea 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h @@ -16,8 +16,7 @@ struct a4xx_gpu { struct adreno_gpu base; /* if OCMEM is used for GMEM: */ - uint32_t ocmem_base; - void *ocmem_hdl; + struct adreno_ocmem ocmem; }; #define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e9c55d1d6c04..b02e2042547f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -353,6 +353,9 @@ static int a5xx_me_init(struct msm_gpu *gpu) * 2D mode 3 draw */ OUT_RING(ring, 0x0000000B); + } else if (adreno_is_a510(adreno_gpu)) { + /* Workaround for token and syncs */ + OUT_RING(ring, 0x00000001); } else { /* No workarounds enabled */ OUT_RING(ring, 0x00000000); @@ -568,15 +571,24 @@ static int a5xx_hw_init(struct msm_gpu *gpu) 0x00100000 + adreno_gpu->gmem - 1); gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); - gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); - if (adreno_is_a530(adreno_gpu)) - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); - if (adreno_is_a540(adreno_gpu)) - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); - gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); - gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); - - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); + if (adreno_is_a510(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x200 << 11 | 0x200 << 22)); + } else { + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); + if (adreno_is_a530(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); + if (adreno_is_a540(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x400 << 11 | 0x300 << 22)); + } if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); @@ -589,6 +601,19 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* Enable ME/PFP split notification */ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); + /* + * In A5x, CCU can send context_done event of a particular context to + * UCHE which ultimately reaches CP even when there is valid + * transaction of that context inside CCU. This can let CP to program + * config registers, which will make the "valid transaction" inside + * CCU to be interpreted differently. This can cause gpu fault. This + * bug is fixed in latest A510 revision. To enable this bug fix - + * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1 + * (disable). For older A510 version this bit is unused. + */ + if (adreno_is_a510(adreno_gpu)) + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0); + /* Enable HWCG */ a5xx_set_hwcg(gpu, true); @@ -635,7 +660,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* UCHE */ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); - if (adreno_is_a530(adreno_gpu)) + if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_PROTECT(17), ADRENO_PROTECT_RW(0x10000, 0x8000)); @@ -679,7 +704,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) a5xx_preempt_hw_init(gpu); - a5xx_gpmu_ucode_init(gpu); + if (!adreno_is_a510(adreno_gpu)) + a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); if (ret) @@ -712,7 +738,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) } /* - * Try to load a zap shader into the secure world. If successful + * If the chip that we are using does support loading one, then + * try to load a zap shader into the secure world. If successful * we can use the CP to switch out of secure mode. If not then we * have no resource but to try to switch ourselves out manually. If we * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will @@ -1066,6 +1093,7 @@ static void a5xx_dump(struct msm_gpu *gpu) static int a5xx_pm_resume(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int ret; /* Turn on the core power */ @@ -1073,6 +1101,15 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; + if (adreno_is_a510(adreno_gpu)) { + /* Halt the sp_input_clk at HM level */ + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); + a5xx_set_hwcg(gpu, true); + /* Turn on sp_input_clk at HM level */ + gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0); + return 0; + } + /* Turn the RBCCU domain first to limit the chances of voltage droop */ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); @@ -1101,9 +1138,17 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) static int a5xx_pm_suspend(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + u32 mask = 0xf; + + /* A510 has 3 XIN ports in VBIF */ + if (adreno_is_a510(adreno_gpu)) + mask = 0x7; + /* Clear the VBIF pipe before shutting down */ - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); - spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask); + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & + mask) == mask); gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); @@ -1289,7 +1334,7 @@ static void a5xx_gpu_state_destroy(struct kref *kref) kfree(a5xx_state); } -int a5xx_gpu_state_put(struct msm_gpu_state *state) +static int a5xx_gpu_state_put(struct msm_gpu_state *state) { if (IS_ERR_OR_NULL(state)) return 1; @@ -1299,8 +1344,8 @@ int a5xx_gpu_state_put(struct msm_gpu_state *state) #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) -void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, - struct drm_printer *p) +static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p) { int i, j; u32 pos = 0; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index a3a06db675ba..321a8061fd32 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -297,6 +297,10 @@ int a5xx_power_init(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int ret; + /* Not all A5xx chips have a GPMU */ + if (adreno_is_a510(adreno_gpu)) + return 0; + /* Set up the limits management */ if (adreno_is_a530(adreno_gpu)) a530_lm_setup(gpu); @@ -326,6 +330,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) unsigned int *data, *ptr, *cmds; unsigned int cmds_size; + if (adreno_is_a510(adreno_gpu)) + return; + if (a5xx_gpu->gpmu_bo) return; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e686331fa089..691c1a277d91 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, cxdbg = ioremap(res->start, resource_size(res)); if (cxdbg) { - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT, A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM, A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); } a6xx_state->debugbus = state_kcalloc(a6xx_state, diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 0888e0df660d..fbbdf86504f5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -115,6 +115,21 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a4xx_gpu_init, }, { + .rev = ADRENO_REV(5, 1, 0, ANY_ID), + .revn = 510, + .name = "A510", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = SZ_256K, + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .init = a5xx_gpu_init, + }, { .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 048c8be426f3..0783e4b5486a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -14,6 +14,7 @@ #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/soc/qcom/mdt_loader.h> +#include <soc/qcom/ocmem.h> #include "adreno_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" @@ -893,6 +894,45 @@ static int adreno_get_pwrlevels(struct device *dev, return 0; } +int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, + struct adreno_ocmem *adreno_ocmem) +{ + struct ocmem_buf *ocmem_hdl; + struct ocmem *ocmem; + + ocmem = of_get_ocmem(dev); + if (IS_ERR(ocmem)) { + if (PTR_ERR(ocmem) == -ENODEV) { + /* + * Return success since either the ocmem property was + * not specified in device tree, or ocmem support is + * not compiled into the kernel. + */ + return 0; + } + + return PTR_ERR(ocmem); + } + + ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem); + if (IS_ERR(ocmem_hdl)) + return PTR_ERR(ocmem_hdl); + + adreno_ocmem->ocmem = ocmem; + adreno_ocmem->base = ocmem_hdl->addr; + adreno_ocmem->hdl = ocmem_hdl; + adreno_gpu->gmem = ocmem_hdl->len; + + return 0; +} + +void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem) +{ + if (adreno_ocmem && adreno_ocmem->base) + ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, + adreno_ocmem->hdl); +} + int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs, int nr_rings) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index c7441fb8313e..e71a7570ef72 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -126,6 +126,12 @@ struct adreno_gpu { }; #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) +struct adreno_ocmem { + struct ocmem *ocmem; + unsigned long base; + void *hdl; +}; + /* platform config data (ie. from DT, or pdata) */ struct adreno_platform_config { struct adreno_rev rev; @@ -206,6 +212,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a510(struct adreno_gpu *gpu) +{ + return gpu->revn == 510; +} + static inline int adreno_is_a530(struct adreno_gpu *gpu) { return gpu->revn == 530; @@ -236,6 +247,10 @@ void adreno_dump(struct msm_gpu *gpu); void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); +int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, + struct adreno_ocmem *ocmem); +void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); + int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, int nr_rings); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c index cdbea38b8697..f1bc6a1af7a7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c @@ -55,8 +55,7 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx) int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms, enum dpu_intr_type intr_type, u32 instance_idx) { - if (!dpu_kms || !dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.irq_idx_lookup) + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup) return -EINVAL; return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type, @@ -73,7 +72,7 @@ static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx) unsigned long irq_flags; int ret = 0, enable_count; - if (!dpu_kms || !dpu_kms->hw_intr || + if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts || !dpu_kms->irq_obj.irq_counts) { DPU_ERROR("invalid params\n"); @@ -114,7 +113,7 @@ int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) { int i, ret = 0, counts; - if (!dpu_kms || !irq_idxs || !irq_count) { + if (!irq_idxs || !irq_count) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -138,7 +137,7 @@ static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx) { int ret = 0, enable_count; - if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { + if (!dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -169,7 +168,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) { int i, ret = 0, counts; - if (!dpu_kms || !irq_idxs || !irq_count) { + if (!irq_idxs || !irq_count) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -186,7 +185,7 @@ int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count) u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) { - if (!dpu_kms || !dpu_kms->hw_intr || + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.get_interrupt_status) return 0; @@ -205,7 +204,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, { unsigned long irq_flags; - if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) { + if (!dpu_kms->irq_obj.irq_cb_tbl) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -240,7 +239,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, { unsigned long irq_flags; - if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) { + if (!dpu_kms->irq_obj.irq_cb_tbl) { DPU_ERROR("invalid params\n"); return -EINVAL; } @@ -274,8 +273,7 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) { - if (!dpu_kms || !dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.clear_all_irqs) + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs) return; dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr); @@ -283,8 +281,7 @@ static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) { - if (!dpu_kms || !dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.disable_all_irqs) + if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs) return; dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr); @@ -343,18 +340,8 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) { - struct msm_drm_private *priv; int i; - if (!dpu_kms->dev) { - DPU_ERROR("invalid drm device\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid device private\n"); - return; - } - priv = dpu_kms->dev->dev_private; - pm_runtime_get_sync(&dpu_kms->pdev->dev); dpu_clear_all_irqs(dpu_kms); dpu_disable_all_irqs(dpu_kms); @@ -379,18 +366,8 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) { - struct msm_drm_private *priv; int i; - if (!dpu_kms->dev) { - DPU_ERROR("invalid drm device\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid device private\n"); - return; - } - priv = dpu_kms->dev->dev_private; - pm_runtime_get_sync(&dpu_kms->pdev->dev); for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) || diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 09a49b59bb5b..11f2bebe3869 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -32,18 +32,7 @@ enum dpu_perf_mode { static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv; - - if (!crtc->dev || !crtc->dev->dev_private) { - DPU_ERROR("invalid device\n"); - return NULL; - } - priv = crtc->dev->dev_private; - if (!priv || !priv->kms) { - DPU_ERROR("invalid kms\n"); - return NULL; - } - return to_dpu_kms(priv->kms); } @@ -116,7 +105,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, } kms = _dpu_crtc_get_kms(crtc); - if (!kms || !kms->catalog) { + if (!kms->catalog) { DPU_ERROR("invalid parameters\n"); return 0; } @@ -215,7 +204,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc; - struct dpu_crtc_state *dpu_cstate; struct dpu_kms *kms; if (!crtc) { @@ -224,13 +212,12 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) } kms = _dpu_crtc_get_kms(crtc); - if (!kms || !kms->catalog) { + if (!kms->catalog) { DPU_ERROR("invalid kms\n"); return; } dpu_crtc = to_dpu_crtc(crtc); - dpu_cstate = to_dpu_crtc_state(crtc->state); if (atomic_dec_return(&kms->bandwidth_ref) > 0) return; @@ -287,7 +274,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, u64 clk_rate = 0; struct dpu_crtc *dpu_crtc; struct dpu_crtc_state *dpu_cstate; - struct msm_drm_private *priv; struct dpu_kms *kms; int ret; @@ -297,11 +283,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc, } kms = _dpu_crtc_get_kms(crtc); - if (!kms || !kms->catalog) { + if (!kms->catalog) { DPU_ERROR("invalid kms\n"); return -EINVAL; } - priv = kms->dev->dev_private; dpu_crtc = to_dpu_crtc(crtc); dpu_cstate = to_dpu_crtc_state(crtc->state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index ce59adff06aa..f197dce54576 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -266,11 +266,20 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; - if (!crtc || !crtc->dev) { + if (!crtc) { DPU_ERROR("invalid crtc\n"); return INTF_MODE_NONE; } + /* + * TODO: This function is called from dpu debugfs and as part of atomic + * check. When called from debugfs, the crtc->mutex must be held to + * read crtc->state. However reading crtc->state from atomic check isn't + * allowed (unless you have a good reason, a big comment, and a deep + * understanding of how the atomic/modeset locks work (<- and this is + * probably not possible)). So we'll keep the WARN_ON here for now, but + * really we need to figure out a better way to track our operating mode + */ WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); /* TODO: Returns the first INTF_MODE, could there be multiple values? */ @@ -694,7 +703,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, unsigned long flags; bool release_bandwidth = false; - if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { + if (!crtc || !crtc->state) { DPU_ERROR("invalid crtc\n"); return; } @@ -766,7 +775,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, struct msm_drm_private *priv; bool request_bandwidth; - if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + if (!crtc) { DPU_ERROR("invalid crtc\n"); return; } @@ -1288,13 +1297,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, { struct drm_crtc *crtc = NULL; struct dpu_crtc *dpu_crtc = NULL; - struct msm_drm_private *priv = NULL; - struct dpu_kms *kms = NULL; int i; - priv = dev->dev_private; - kms = to_dpu_kms(priv->kms); - dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); if (!dpu_crtc) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index d82ea994063f..f96e142c4361 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -645,11 +645,6 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } - hw_mdptop = dpu_kms->hw_mdp; if (!hw_mdptop) { DPU_ERROR("invalid mdptop\n"); @@ -735,8 +730,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, struct msm_drm_private *priv; bool is_vid_mode = false; - if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || - !drm_enc->crtc) { + if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { DPU_ERROR("invalid parameters\n"); return -EINVAL; } @@ -1092,17 +1086,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { + if (!drm_enc || !drm_enc->dev) { DPU_ERROR("invalid parameters\n"); return; } priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } dpu_enc = to_dpu_encoder_virt(drm_enc); if (!dpu_enc || !dpu_enc->cur_master) { @@ -1184,7 +1174,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct drm_display_mode *mode; int i = 0; if (!drm_enc) { @@ -1193,9 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) } else if (!drm_enc->dev) { DPU_ERROR("invalid dev\n"); return; - } else if (!drm_enc->dev->dev_private) { - DPU_ERROR("invalid dev_private\n"); - return; } dpu_enc = to_dpu_encoder_virt(drm_enc); @@ -1204,8 +1190,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) mutex_lock(&dpu_enc->enc_lock); dpu_enc->enabled = false; - mode = &drm_enc->crtc->state->adjusted_mode; - priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); @@ -1734,8 +1718,7 @@ static void dpu_encoder_vsync_event_handler(struct timer_list *t) struct msm_drm_private *priv; struct msm_drm_thread *event_thread; - if (!drm_enc->dev || !drm_enc->dev->dev_private || - !drm_enc->crtc) { + if (!drm_enc->dev || !drm_enc->crtc) { DPU_ERROR("invalid parameters\n"); return; } @@ -1914,8 +1897,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode, static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); - struct msm_drm_private *priv; - struct dpu_kms *dpu_kms; int i; static const struct file_operations debugfs_status_fops = { @@ -1927,14 +1908,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) char name[DPU_NAME_SIZE]; - if (!drm_enc->dev || !drm_enc->dev->dev_private) { + if (!drm_enc->dev) { DPU_ERROR("invalid encoder or kms\n"); return -EINVAL; } - priv = drm_enc->dev->dev_private; - dpu_kms = to_dpu_kms(priv->kms); - snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id); /* create overall sub-directory for the encoder */ @@ -2042,9 +2020,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, enum dpu_intf_type intf_type; struct dpu_enc_phys_init_params phys_params; - if (!dpu_enc || !dpu_kms) { - DPU_ERROR("invalid arg(s), enc %d kms %d\n", - dpu_enc != 0, dpu_kms != 0); + if (!dpu_enc) { + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); return -EINVAL; } @@ -2133,14 +2110,12 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, frame_done_timer); struct drm_encoder *drm_enc = &dpu_enc->base; - struct msm_drm_private *priv; u32 event; - if (!drm_enc->dev || !drm_enc->dev->dev_private) { + if (!drm_enc->dev) { DPU_ERROR("invalid parameters\n"); return; } - priv = drm_enc->dev->dev_private; if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 2923b63d95fe..047960949fbb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -124,13 +124,11 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx) static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) { struct dpu_encoder_phys *phys_enc = arg; - struct dpu_encoder_phys_cmd *cmd_enc; if (!phys_enc || !phys_enc->hw_ctl) return; DPU_ATRACE_BEGIN("ctl_start_irq"); - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); @@ -316,13 +314,9 @@ end: static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, bool enable) { - struct dpu_encoder_phys_cmd *cmd_enc; - if (!phys_enc) return; - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, enable, atomic_read(&phys_enc->vblank_refcount)); @@ -355,7 +349,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config( struct drm_display_mode *mode; bool tc_enable = true; u32 vsync_hz; - struct msm_drm_private *priv; struct dpu_kms *dpu_kms; if (!phys_enc || !phys_enc->hw_pp) { @@ -373,11 +366,6 @@ static void dpu_encoder_phys_cmd_tearcheck_config( } dpu_kms = phys_enc->dpu_kms; - if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) { - DPU_ERROR("invalid device\n"); - return; - } - priv = dpu_kms->dev->dev_private; /* * TE default: dsi byte clock calculated base on 70 fps; @@ -650,13 +638,10 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete( struct dpu_encoder_phys *phys_enc) { int rc; - struct dpu_encoder_phys_cmd *cmd_enc; if (!phys_enc) return -EINVAL; - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); if (rc) { DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index b9c84fb4d4a1..3123ef873cdf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -374,7 +374,7 @@ static void dpu_encoder_phys_vid_mode_set( struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - if (!phys_enc || !phys_enc->dpu_kms) { + if (!phys_enc) { DPU_ERROR("invalid encoder/kms\n"); return; } @@ -566,16 +566,13 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff( static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) { - struct msm_drm_private *priv; unsigned long lock_flags; int ret; - if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev || - !phys_enc->parent->dev->dev_private) { + if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev) { DPU_ERROR("invalid encoder/device\n"); return; } - priv = phys_enc->parent->dev->dev_private; if (!phys_enc->hw_intf || !phys_enc->hw_ctl) { DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 58b0485dc375..6c92f0fbeac9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -30,10 +30,6 @@ #define CREATE_TRACE_POINTS #include "dpu_trace.h" -static const char * const iommu_ports[] = { - "mdp_0", -}; - /* * To enable overall DRM driver logging * # echo 0x2 > /sys/module/drm/parameters/debug @@ -68,16 +64,14 @@ static int _dpu_danger_signal_status(struct seq_file *s, bool danger_status) { struct dpu_kms *kms = (struct dpu_kms *)s->private; - struct msm_drm_private *priv; struct dpu_danger_safe_status status; int i; - if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) { + if (!kms->hw_mdp) { DPU_ERROR("invalid arg(s)\n"); return 0; } - priv = kms->dev->dev_private; memset(&status, 0, sizeof(struct dpu_danger_safe_status)); pm_runtime_get_sync(&kms->pdev->dev); @@ -153,13 +147,7 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) return 0; dev = dpu_kms->dev; - if (!dev) - return 0; - priv = dev->dev_private; - if (!priv) - return 0; - base = dpu_kms->mmio + regset->offset; /* insert padding spaces, if needed */ @@ -280,7 +268,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct dpu_kms *dpu_kms; - struct msm_drm_private *priv; struct drm_device *dev; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -292,10 +279,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; - if (!dev || !dev->dev_private) - return; - priv = dev->dev_private; - /* Call prepare_commit for all affected encoders */ for_each_new_crtc_in_state(state, crtc, crtc_state, i) { drm_for_each_encoder_mask(encoder, crtc->dev, @@ -333,7 +316,6 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder) if (funcs && funcs->commit) funcs->commit(encoder); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); drm_for_each_crtc(crtc, dev) { if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) continue; @@ -464,16 +446,6 @@ static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) struct msm_drm_private *priv; int i; - if (!dpu_kms) { - DPU_ERROR("invalid dpu_kms\n"); - return; - } else if (!dpu_kms->dev) { - DPU_ERROR("invalid dev\n"); - return; - } else if (!dpu_kms->dev->dev_private) { - DPU_ERROR("invalid dev_private\n"); - return; - } priv = dpu_kms->dev->dev_private; for (i = 0; i < priv->num_crtcs; i++) @@ -505,7 +477,6 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; int max_crtc_count; - dev = dpu_kms->dev; priv = dev->dev_private; catalog = dpu_kms->catalog; @@ -585,8 +556,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) int i; dev = dpu_kms->dev; - if (!dev) - return; if (dpu_kms->hw_intr) dpu_hw_intr_destroy(dpu_kms->hw_intr); @@ -725,8 +694,7 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) mmu = dpu_kms->base.aspace->mmu; - mmu->funcs->detach(mmu, (const char **)iommu_ports, - ARRAY_SIZE(iommu_ports)); + mmu->funcs->detach(mmu); msm_gem_address_space_put(dpu_kms->base.aspace); dpu_kms->base.aspace = NULL; @@ -752,8 +720,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return PTR_ERR(aspace); } - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) { DPU_ERROR("failed to attach iommu %d\n", ret); msm_gem_address_space_put(aspace); @@ -803,16 +770,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; - if (!dev) { - DPU_ERROR("invalid device\n"); - return rc; - } - priv = dev->dev_private; - if (!priv) { - DPU_ERROR("invalid private data\n"); - return rc; - } atomic_set(&dpu_kms->bandwidth_ref, 0); @@ -974,7 +932,7 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev) struct dpu_kms *dpu_kms; int irq; - if (!dev || !dev->dev_private) { + if (!dev) { DPU_ERROR("drm device node invalid\n"); return ERR_PTR(-EINVAL); } @@ -1064,11 +1022,6 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) struct dss_module_power *mp = &dpu_kms->mp; ddev = dpu_kms->dev; - if (!ddev) { - DPU_ERROR("invalid drm_device\n"); - return rc; - } - rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); @@ -1086,11 +1039,6 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) struct dss_module_power *mp = &dpu_kms->mp; ddev = dpu_kms->dev; - if (!ddev) { - DPU_ERROR("invalid drm_device\n"); - return rc; - } - rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { DPU_ERROR("clock enable failed rc:%d\n", rc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 959d03e007fa..c6169e7df19d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -139,10 +139,6 @@ struct vsync_info { #define to_dpu_kms(x) container_of(x, struct dpu_kms, base) -/* get struct msm_kms * from drm_device * */ -#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \ - ((struct msm_drm_private *)((D)->dev_private))->kms : NULL) - /** * Debugfs functions - extra helper functions for debugfs support * diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 8d24b79fd400..991f4c8f8a12 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -154,10 +154,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, u32 ot_lim; int ret, i; - if (!dpu_kms) { - DPU_ERROR("invalid arguments\n"); - return; - } mdp = dpu_kms->hw_mdp; for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { @@ -214,7 +210,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, const struct dpu_vbif_qos_tbl *qos_tbl; int i; - if (!dpu_kms || !params || !dpu_kms->hw_mdp) { + if (!params || !dpu_kms->hw_mdp) { DPU_ERROR("invalid arguments\n"); return; } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 50711ccc8691..dda05436f716 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -157,10 +157,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, } } -static const char * const iommu_ports[] = { - "mdp_port0_cb0", "mdp_port1_cb0", -}; - static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); @@ -172,8 +168,7 @@ static void mdp4_destroy(struct msm_kms *kms) drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); + aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } @@ -524,8 +519,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) kms->aspace = aspace; - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) goto fail; } else { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index f6e71ff539ca..1f48f64539a2 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -14,7 +14,7 @@ struct mdp5_cfg_handler { /* mdp5_cfg must be exposed (used in mdp5.xml.h) */ const struct mdp5_cfg_hw *mdp5_cfg = NULL; -const struct mdp5_cfg_hw msm8x74v1_config = { +static const struct mdp5_cfg_hw msm8x74v1_config = { .name = "msm8x74v1", .mdp = { .count = 1, @@ -98,7 +98,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = { .max_clk = 200000000, }; -const struct mdp5_cfg_hw msm8x74v2_config = { +static const struct mdp5_cfg_hw msm8x74v2_config = { .name = "msm8x74", .mdp = { .count = 1, @@ -180,7 +180,7 @@ const struct mdp5_cfg_hw msm8x74v2_config = { .max_clk = 200000000, }; -const struct mdp5_cfg_hw apq8084_config = { +static const struct mdp5_cfg_hw apq8084_config = { .name = "apq8084", .mdp = { .count = 1, @@ -275,7 +275,7 @@ const struct mdp5_cfg_hw apq8084_config = { .max_clk = 320000000, }; -const struct mdp5_cfg_hw msm8x16_config = { +static const struct mdp5_cfg_hw msm8x16_config = { .name = "msm8x16", .mdp = { .count = 1, @@ -342,7 +342,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .max_clk = 320000000, }; -const struct mdp5_cfg_hw msm8x94_config = { +static const struct mdp5_cfg_hw msm8x94_config = { .name = "msm8x94", .mdp = { .count = 1, @@ -437,7 +437,7 @@ const struct mdp5_cfg_hw msm8x94_config = { .max_clk = 400000000, }; -const struct mdp5_cfg_hw msm8x96_config = { +static const struct mdp5_cfg_hw msm8x96_config = { .name = "msm8x96", .mdp = { .count = 1, @@ -545,7 +545,104 @@ const struct mdp5_cfg_hw msm8x96_config = { .max_clk = 412500000, }; -const struct mdp5_cfg_hw msm8917_config = { +const struct mdp5_cfg_hw msm8x76_config = { + .name = "msm8x76", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_DSC | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .smp = { + .mmb_count = 10, + .mmb_size = 10240, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 9, + [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .pipe_vig = { + .count = 2, + .base = { 0x04000, 0x06000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x440DC }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + .lm = { + .count = 2, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 3, + .base = { 0x70000, 0x70800, 0x72000 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .max_clk = 360000000, +}; + +static const struct mdp5_cfg_hw msm8917_config = { .name = "msm8917", .mdp = { .count = 1, @@ -630,7 +727,7 @@ const struct mdp5_cfg_hw msm8917_config = { .max_clk = 320000000, }; -const struct mdp5_cfg_hw msm8998_config = { +static const struct mdp5_cfg_hw msm8998_config = { .name = "msm8998", .mdp = { .count = 1, @@ -745,6 +842,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = { { .revision = 6, .config = { .hw = &msm8x16_config } }, { .revision = 9, .config = { .hw = &msm8x94_config } }, { .revision = 7, .config = { .hw = &msm8x96_config } }, + { .revision = 11, .config = { .hw = &msm8x76_config } }, { .revision = 15, .config = { .hw = &msm8917_config } }, }; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index eb0b4b7dc7cc..05cc04f729d6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -214,7 +214,6 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; - const struct mdp5_cfg_hw *hw_cfg; struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; const struct mdp_format *format; struct mdp5_hw_mixer *mixer = pipeline->mixer; @@ -232,8 +231,6 @@ static void blend_setup(struct drm_crtc *crtc) u32 val; #define blender(stage) ((stage) - STAGE0) - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); /* ctl could be released already when we are shutting down: */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 91cd76a2bab1..e43ecd4be10a 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -19,10 +19,6 @@ #include "msm_mmu.h" #include "mdp5_kms.h" -static const char *iommu_ports[] = { - "mdp_0", -}; - static int mdp5_hw_init(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -233,8 +229,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms) mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); if (aspace) { - aspace->mmu->funcs->detach(aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); + aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } } @@ -314,6 +309,10 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) mdp5_kms->enable_count--; WARN_ON(mdp5_kms->enable_count < 0); + if (mdp5_kms->tbu_rt_clk) + clk_disable_unprepare(mdp5_kms->tbu_rt_clk); + if (mdp5_kms->tbu_clk) + clk_disable_unprepare(mdp5_kms->tbu_clk); clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); @@ -334,6 +333,10 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) clk_prepare_enable(mdp5_kms->core_clk); if (mdp5_kms->lut_clk) clk_prepare_enable(mdp5_kms->lut_clk); + if (mdp5_kms->tbu_clk) + clk_prepare_enable(mdp5_kms->tbu_clk); + if (mdp5_kms->tbu_rt_clk) + clk_prepare_enable(mdp5_kms->tbu_rt_clk); return 0; } @@ -466,14 +469,11 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; - const struct mdp5_cfg_hw *hw_cfg; unsigned int num_crtcs; int i, ret, pi = 0, ci = 0; struct drm_plane *primary[MAX_BASES] = { NULL }; struct drm_plane *cursor[MAX_BASES] = { NULL }; - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* * Construct encoders and modeset initialize connector devices * for each external display interface. @@ -737,8 +737,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) kms->aspace = aspace; - ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) { DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", ret); @@ -974,6 +973,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) /* optional clocks: */ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); + get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false); + get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index d1bf4fdfc815..128866742593 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -53,6 +53,8 @@ struct mdp5_kms { struct clk *ahb_clk; struct clk *core_clk; struct clk *lut_clk; + struct clk *tbu_clk; + struct clk *tbu_rt_clk; struct clk *vsync_clk; /* diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index b31cfb554fa2..d7fa2c49e741 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -121,7 +121,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, struct mdp5_kms *mdp5_kms = get_kms(smp); int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); int i, hsub, nplanes, nlines; - u32 fmt = format->base.pixel_format; uint32_t blkcfg = 0; nplanes = info->num_planes; @@ -135,7 +134,6 @@ uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, * them together, writes to SMP using a single client. */ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { - fmt = DRM_FORMAT_NV24; nplanes = 2; /* if decimation is enabled, HW decimates less on the diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index b7b7c1a9164a..86ad3fdf207d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -66,6 +66,26 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { .num_dsi = 1, }; +static const char * const dsi_8976_bus_clk_names[] = { + "mdp_core", "iface", "bus", +}; + +static const struct msm_dsi_config msm8976_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 3, + .regs = { + {"gdsc", -1, -1}, + {"vdda", 100000, 100}, /* 1.2 V */ + {"vddio", 100000, 100}, /* 1.8 V */ + }, + }, + .bus_clk_names = dsi_8976_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names), + .io_start = { 0x1a94000, 0x1a96000 }, + .num_dsi = 2, +}; + static const struct msm_dsi_config msm8994_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { @@ -147,7 +167,7 @@ static const struct msm_dsi_config sdm845_dsi_cfg = { .num_dsi = 2, }; -const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { +static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { .link_clk_enable = dsi_link_clk_enable_v2, .link_clk_disable = dsi_link_clk_disable_v2, .clk_init_ver = dsi_clk_init_v2, @@ -158,7 +178,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { .calc_clk_rate = dsi_calc_clk_rate_v2, }; -const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { .link_clk_enable = dsi_link_clk_enable_6g, .link_clk_disable = dsi_link_clk_disable_6g, .clk_init_ver = NULL, @@ -169,7 +189,7 @@ const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { .calc_clk_rate = dsi_calc_clk_rate_6g, }; -const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { .link_clk_enable = dsi_link_clk_enable_6g, .link_clk_disable = dsi_link_clk_disable_6g, .clk_init_ver = dsi_clk_init_6g_v2, @@ -197,6 +217,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8916_dsi_cfg, &msm_dsi_6g_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2, + &msm8976_dsi_cfg, &msm_dsi_6g_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index e2b7a7dfbe49..50a37ceb6a25 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -17,6 +17,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 +#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 1e7b1be25bb0..458cec82ae13 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1293,14 +1293,13 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, u8 *buf, int rx_byte, int pkt_size) { - u32 *lp, *temp, data; + u32 *temp, data; int i, j = 0, cnt; u32 read_cnt; u8 reg[16]; int repeated_bytes = 0; int buf_offset = buf - msm_host->rx_buf; - lp = (u32 *)buf; temp = (u32 *)reg; cnt = (rx_byte + 3) >> 2; if (cnt > 4) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 3522863a4984..b0cfa67d2a57 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -145,7 +145,7 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, { const unsigned long bit_rate = clk_req->bitclk_rate; const unsigned long esc_rate = clk_req->escclk_rate; - s32 ui, ui_x8, lpx; + s32 ui, ui_x8; s32 tmax, tmin; s32 pcnt0 = 50; s32 pcnt1 = 50; @@ -175,7 +175,6 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); ui_x8 = ui << 3; - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); tmin = max_t(s32, temp, 0); @@ -262,7 +261,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, { const unsigned long bit_rate = clk_req->bitclk_rate; const unsigned long esc_rate = clk_req->escclk_rate; - s32 ui, ui_x8, lpx; + s32 ui, ui_x8; s32 tmax, tmin; s32 pcnt0 = 50; s32 pcnt1 = 50; @@ -284,7 +283,6 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); ui_x8 = ui << 3; - lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); tmin = max_t(s32, temp, 0); @@ -485,6 +483,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY { .compatible = "qcom,dsi-phy-28nm-hpm", .data = &dsi_phy_28nm_hpm_cfgs }, + { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b", + .data = &dsi_phy_28nm_hpm_famb_cfgs }, { .compatible = "qcom,dsi-phy-28nm-lp", .data = &dsi_phy_28nm_lp_cfgs }, #endif diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index c4069ce6afe6..24b294ed3059 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -40,6 +40,7 @@ struct msm_dsi_phy_cfg { }; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index b3f678f6c2aa..c3c580cfd8b1 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -39,15 +39,10 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); } -static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy) { void __iomem *base = phy->reg_base; - if (!enable) { - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); - return; - } - dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0); @@ -56,6 +51,39 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7); dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); +} + +static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->reg_base; + + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1); + dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20); + + if (phy->cfg->type == MSM_DSI_PHY_28NM_LP) + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05); + else + dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d); +} + +static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) +{ + if (!enable) { + dsi_phy_write(phy->reg_base + + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0); + return; + } + + if (phy->regulator_ldo_mode) + dsi_28nm_phy_regulator_enable_ldo(phy); + else + dsi_28nm_phy_regulator_enable_dcdc(phy); } static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, @@ -77,8 +105,6 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, dsi_28nm_phy_regulator_ctrl(phy, true); - dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00); - dsi_28nm_dphy_set_timing(phy, timing); dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00); @@ -142,6 +168,24 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .num_dsi_phy = 2, }; +const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = { + .type = MSM_DSI_PHY_28NM_HPM, + .src_pll_truthtable = { {true, true}, {false, true} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vddio", 100000, 100}, + }, + }, + .ops = { + .enable = dsi_28nm_phy_enable, + .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, + }, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, +}; + const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .type = MSM_DSI_PHY_28NM_LP, .src_pll_truthtable = { {true, true}, {true, true} }, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 1697e61f9c2f..8a38d4b95102 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -29,8 +29,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) reg = devm_regulator_get(dev, cfg->reg_names[i]); if (IS_ERR(reg)) { ret = PTR_ERR(reg); - DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n", - cfg->reg_names[i], ret); + if (ret != -EPROBE_DEFER) { + DRM_DEV_ERROR(dev, + "failed to get phy regulator: %s (%d)\n", + cfg->reg_names[i], ret); + } + return ret; } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 6be879578140..1c74381a4fc9 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file) struct msm_gpu_show_priv *show_priv = m->private; struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_gpu *gpu = priv->gpu; - int ret; - - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex); - if (ret) - return ret; + mutex_lock(&show_priv->dev->struct_mutex); gpu->funcs->gpu_state_put(show_priv->state); mutex_unlock(&show_priv->dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index a052364a5d74..18f3a5c53ffb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -16,6 +16,7 @@ #include <linux/pm_opp.h> #include <linux/devfreq.h> #include <linux/devcoredump.h> +#include <linux/sched/task.h> /* * Power Management: @@ -838,7 +839,7 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, return ERR_CAST(aspace); } - ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); + ret = aspace->mmu->funcs->attach(aspace->mmu); if (ret) { msm_gem_address_space_put(aspace); return ERR_PTR(ret); @@ -995,8 +996,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false); if (!IS_ERR_OR_NULL(gpu->aspace)) { - gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, - NULL, 0); + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); msm_gem_address_space_put(gpu->aspace); } } diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c index 34f643a0c28a..34980d8eb7ad 100644 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ b/drivers/gpu/drm/msm/msm_gpummu.c @@ -21,14 +21,12 @@ struct msm_gpummu { #define GPUMMU_PAGE_SIZE SZ_4K #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) -static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static int msm_gpummu_attach(struct msm_mmu *mmu) { return 0; } -static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static void msm_gpummu_detach(struct msm_mmu *mmu) { } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 8c95c31e2b12..ad58cfe5998e 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -23,16 +23,14 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, return 0; } -static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static int msm_iommu_attach(struct msm_mmu *mmu) { struct msm_iommu *iommu = to_msm_iommu(mmu); return iommu_attach_device(iommu->domain, mmu->dev); } -static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, - int cnt) +static void msm_iommu_detach(struct msm_mmu *mmu) { struct msm_iommu *iommu = to_msm_iommu(mmu); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 871d56303697..67a623f14319 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -10,8 +10,8 @@ #include <linux/iommu.h> struct msm_mmu_funcs { - int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); - void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); + int (*attach)(struct msm_mmu *mmu); + void (*detach)(struct msm_mmu *mmu); int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot); int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len); diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index c7832a951039..af7ceb246c7c 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -298,7 +298,7 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) static void snapshot_buf(struct msm_rd_state *rd, struct msm_gem_submit *submit, int idx, - uint64_t iova, uint32_t size) + uint64_t iova, uint32_t size, bool full) { struct msm_gem_object *obj = submit->bos[idx].obj; unsigned offset = 0; @@ -318,6 +318,9 @@ static void snapshot_buf(struct msm_rd_state *rd, rd_write_section(rd, RD_GPUADDR, (uint32_t[3]){ iova, size, iova >> 32 }, 12); + if (!full) + return; + /* But only dump the contents of buffers marked READ */ if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) return; @@ -381,18 +384,21 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); for (i = 0; i < submit->nr_bos; i++) - if (should_dump(submit, i)) - snapshot_buf(rd, submit, i, 0, 0); + snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i)); for (i = 0; i < submit->nr_cmds; i++) { - uint64_t iova = submit->cmd[i].iova; uint32_t szd = submit->cmd[i].size; /* in dwords */ /* snapshot cmdstream bo's (if we haven't already): */ if (!should_dump(submit, i)) { snapshot_buf(rd, submit, submit->cmd[i].idx, - submit->cmd[i].iova, szd * 4); + submit->cmd[i].iova, szd * 4, true); } + } + + for (i = 0; i < submit->nr_cmds; i++) { + uint64_t iova = submit->cmd[i].iova; + uint32_t szd = submit->cmd[i].size; /* in dwords */ switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h index 43df86c38f58..24f7700768da 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/atom.h +++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h @@ -114,6 +114,7 @@ struct nv50_head_atom { u8 nhsync:1; u8 nvsync:1; u8 depth:4; + u8 bpc; } or; /* Currently only used for MST */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index a13924ae1992..63425e246018 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder, * same size as the native one (e.g. different * refresh rate) */ - if (adjusted_mode->hdisplay == native_mode->hdisplay && - adjusted_mode->vdisplay == native_mode->vdisplay && - adjusted_mode->type & DRM_MODE_TYPE_DRIVER) + if (mode->hdisplay == native_mode->hdisplay && + mode->vdisplay == native_mode->vdisplay && + mode->type & DRM_MODE_TYPE_DRIVER) break; mode = native_mode; asyc->scaler.full = true; @@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct nouveau_connector *nv_connector = - nouveau_connector(conn_state->connector); - return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, - nv_connector->native_mode); + struct drm_connector *connector = conn_state->connector; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); + int ret; + + ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, + nv_connector->native_mode); + if (ret) + return ret; + + if (crtc_state->mode_changed || crtc_state->connectors_changed) + asyh->or.bpc = connector->display_info.bpc; + + return 0; } /****************************************************************************** @@ -770,32 +780,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, struct nv50_mstm *mstm = mstc->mstm; struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); int slots; + int ret; + + ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, + mstc->native); + if (ret) + return ret; + + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) + return 0; + + /* + * When restoring duplicated states, we need to make sure that the bw + * remains the same and avoid recalculating it, as the connector's bpc + * may have changed after the state was duplicated + */ + if (!state->duplicated) { + const int clock = crtc_state->adjusted_mode.clock; - if (crtc_state->mode_changed || crtc_state->connectors_changed) { /* - * When restoring duplicated states, we need to make sure that - * the bw remains the same and avoid recalculating it, as the - * connector's bpc may have changed after the state was - * duplicated + * XXX: Since we don't use HDR in userspace quite yet, limit + * the bpc to 8 to save bandwidth on the topology. In the + * future, we'll want to properly fix this by dynamically + * selecting the highest possible bpc that would fit in the + * topology */ - if (!state->duplicated) { - const int bpp = connector->display_info.bpc * 3; - const int clock = crtc_state->adjusted_mode.clock; + asyh->or.bpc = min(connector->display_info.bpc, 8U); + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3); + } - asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp); - } + slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, + asyh->dp.pbn); + if (slots < 0) + return slots; - slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, - mstc->port, - asyh->dp.pbn); - if (slots < 0) - return slots; + asyh->dp.tu = slots; - asyh->dp.tu = slots; - } + return 0; +} - return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state, - mstc->native); +static u8 +nv50_dp_bpc_to_depth(unsigned int bpc) +{ + switch (bpc) { + case 6: return 0x2; + case 8: return 0x5; + case 10: /* fall-through */ + default: return 0x6; + } } static void @@ -808,7 +840,7 @@ nv50_msto_enable(struct drm_encoder *encoder) struct nv50_mstm *mstm = NULL; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - u8 proto, depth; + u8 proto; bool r; drm_connector_list_iter_begin(encoder->dev, &conn_iter); @@ -837,14 +869,8 @@ nv50_msto_enable(struct drm_encoder *encoder) else proto = 0x9; - switch (mstc->connector.display_info.bpc) { - case 6: depth = 0x2; break; - case 8: depth = 0x5; break; - case 10: - default: depth = 0x6; break; - } - - mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth); + mstm->outp->update(mstm->outp, head->base.index, armh, proto, + nv50_dp_bpc_to_depth(armh->or.bpc)); msto->head = head; msto->mstc = mstc; @@ -986,20 +1012,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector, return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); } -static const struct drm_connector_helper_funcs -nv50_mstc_help = { - .get_modes = nv50_mstc_get_modes, - .mode_valid = nv50_mstc_mode_valid, - .best_encoder = nv50_mstc_best_encoder, - .atomic_best_encoder = nv50_mstc_atomic_best_encoder, - .atomic_check = nv50_mstc_atomic_check, -}; - -static enum drm_connector_status -nv50_mstc_detect(struct drm_connector *connector, bool force) +static int +nv50_mstc_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) { struct nv50_mstc *mstc = nv50_mstc(connector); - enum drm_connector_status conn_status; int ret; if (drm_connector_is_unregistered(connector)) @@ -1009,14 +1026,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) if (ret < 0 && ret != -EACCES) return connector_status_disconnected; - conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, - mstc->port); + ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr, + mstc->port); pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); - return conn_status; + return ret; } +static const struct drm_connector_helper_funcs +nv50_mstc_help = { + .get_modes = nv50_mstc_get_modes, + .mode_valid = nv50_mstc_mode_valid, + .best_encoder = nv50_mstc_best_encoder, + .atomic_best_encoder = nv50_mstc_atomic_best_encoder, + .atomic_check = nv50_mstc_atomic_check, + .detect_ctx = nv50_mstc_detect, +}; + static void nv50_mstc_destroy(struct drm_connector *connector) { @@ -1031,7 +1058,6 @@ nv50_mstc_destroy(struct drm_connector *connector) static const struct drm_connector_funcs nv50_mstc = { .reset = nouveau_conn_reset, - .detect = nv50_mstc_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = nv50_mstc_destroy, .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state, @@ -1309,14 +1335,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm) } static void -nv50_mstm_init(struct nv50_mstm *mstm) +nv50_mstm_init(struct nv50_mstm *mstm, bool runtime) { int ret; if (!mstm || !mstm->mgr.mst_state) return; - ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); if (ret == -1) { drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); drm_kms_helper_hotplug_event(mstm->mgr.dev); @@ -1498,20 +1524,14 @@ nv50_sor_enable(struct drm_encoder *encoder) lvds.lvds.script |= 0x0200; } - if (nv_connector->base.display_info.bpc == 8) + if (asyh->or.bpc == 8) lvds.lvds.script |= 0x0200; } nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds)); break; case DCB_OUTPUT_DP: - if (nv_connector->base.display_info.bpc == 6) - depth = 0x2; - else - if (nv_connector->base.display_info.bpc == 8) - depth = 0x5; - else - depth = 0x6; + depth = nv50_dp_bpc_to_depth(asyh->or.bpc); if (nv_encoder->link & 1) proto = 0x8; @@ -1662,7 +1682,7 @@ nv50_pior_enable(struct drm_encoder *encoder) nv50_outp_acquire(nv_encoder); nv_connector = nouveau_encoder_connector_get(nv_encoder); - switch (nv_connector->base.display_info.bpc) { + switch (asyh->or.bpc) { case 10: asyh->or.depth = 0x6; break; case 8: asyh->or.depth = 0x5; break; case 6: asyh->or.depth = 0x2; break; @@ -2263,7 +2283,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - nv50_mstm_init(nv_encoder->dp.mstm); + nv50_mstm_init(nv_encoder->dp.mstm, runtime); } } diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 71c23bf1fe25..c9692df2b76c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh, struct nv50_head_atom *asyh, struct nouveau_conn_atom *asyc) { - struct drm_connector *connector = asyc->state.connector; u32 mode = 0x00; if (asyc->dither.mode == DITHERING_MODE_AUTO) { - if (asyh->base.depth > connector->display_info.bpc * 3) + if (asyh->base.depth > asyh->or.bpc * 3) mode = DITHERING_MODE_DYNAMIC2X2; } else { mode = asyc->dither.mode; } if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { - if (connector->display_info.bpc >= 8) + if (asyh->or.bpc >= 8) mode |= DITHERING_DEPTH_8BPC; } else { mode |= asyc->dither.depth; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 3a5db17bc5c7..9a9a7f5003d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector) void nouveau_conn_reset(struct drm_connector *connector) { + struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_conn_atom *asyc; - if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) - return; + if (drm_drv_uses_atomic_modeset(connector->dev)) { + if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL)))) + return; + + if (connector->state) + nouveau_conn_atomic_destroy_state(connector, + connector->state); + + __drm_atomic_helper_connector_reset(connector, &asyc->state); + } else { + asyc = &nv_connector->properties_state; + } - if (connector->state) - nouveau_conn_atomic_destroy_state(connector, connector->state); - __drm_atomic_helper_connector_reset(connector, &asyc->state); asyc->dither.mode = DITHERING_MODE_AUTO; asyc->dither.depth = DITHERING_DEPTH_AUTO; asyc->scaler.mode = DRM_MODE_SCALE_NONE; @@ -276,8 +284,14 @@ void nouveau_conn_attach_properties(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state); struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_conn_atom *armc; + + if (drm_drv_uses_atomic_modeset(connector->dev)) + armc = nouveau_conn_atom(connector->state); + else + armc = &nv_connector->properties_state; /* Init DVI-I specific properties. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) @@ -748,9 +762,9 @@ static int nouveau_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state); struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct nouveau_conn_atom *asyc = &nv_connector->properties_state; struct drm_encoder *encoder = to_drm_encoder(nv_encoder); int ret; @@ -1130,6 +1144,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const char *name = connector->name; struct nouveau_encoder *nv_encoder; int ret; + bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); + + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { + NV_DEBUG(drm, "service %s\n", name); + drm_dp_cec_irq(&nv_connector->aux); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) + nv50_mstm_service(nv_encoder->dp.mstm); + + return NVIF_NOTIFY_KEEP; + } ret = pm_runtime_get(drm->dev->dev); if (ret == 0) { @@ -1150,25 +1174,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) return NVIF_NOTIFY_DROP; } - if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { - NV_DEBUG(drm, "service %s\n", name); - drm_dp_cec_irq(&nv_connector->aux); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) - nv50_mstm_service(nv_encoder->dp.mstm); - } else { - bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); - + if (!plugged) + drm_dp_cec_unset_edid(&nv_connector->aux); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { if (!plugged) - drm_dp_cec_unset_edid(&nv_connector->aux); - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { - if (!plugged) - nv50_mstm_remove(nv_encoder->dp.mstm); - } - - drm_helper_hpd_irq_event(connector->dev); + nv50_mstm_remove(nv_encoder->dp.mstm); } + drm_helper_hpd_irq_event(connector->dev); + pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index f43a8d63aef8..de84fb4708c7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -29,6 +29,7 @@ #include <nvif/notify.h> +#include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> @@ -44,6 +45,60 @@ struct dcb_output; struct nouveau_backlight; #endif +#define nouveau_conn_atom(p) \ + container_of((p), struct nouveau_conn_atom, state) + +struct nouveau_conn_atom { + struct drm_connector_state state; + + struct { + /* The enum values specifically defined here match nv50/gf119 + * hw values, and the code relies on this. + */ + enum { + DITHERING_MODE_OFF = 0x00, + DITHERING_MODE_ON = 0x01, + DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, + DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, + DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, + DITHERING_MODE_AUTO + } mode; + enum { + DITHERING_DEPTH_6BPC = 0x00, + DITHERING_DEPTH_8BPC = 0x02, + DITHERING_DEPTH_AUTO + } depth; + } dither; + + struct { + int mode; /* DRM_MODE_SCALE_* */ + struct { + enum { + UNDERSCAN_OFF, + UNDERSCAN_ON, + UNDERSCAN_AUTO, + } mode; + u32 hborder; + u32 vborder; + } underscan; + bool full; + } scaler; + + struct { + int color_vibrance; + int vibrant_hue; + } procamp; + + union { + struct { + bool dither:1; + bool scaler:1; + bool procamp:1; + }; + u8 mask; + } set; +}; + struct nouveau_connector { struct drm_connector base; enum dcb_connector_type type; @@ -63,6 +118,12 @@ struct nouveau_connector { #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT struct nouveau_backlight *backlight; #endif + /* + * Our connector property code expects a nouveau_conn_atom struct + * even on pre-nv50 where we do not support atomic. This embedded + * version gets used in the non atomic modeset case. + */ + struct nouveau_conn_atom properties_state; }; static inline struct nouveau_connector *nouveau_connector( @@ -121,61 +182,6 @@ extern int nouveau_ignorelid; extern int nouveau_duallink; extern int nouveau_hdmimhz; -#include <drm/drm_crtc.h> -#define nouveau_conn_atom(p) \ - container_of((p), struct nouveau_conn_atom, state) - -struct nouveau_conn_atom { - struct drm_connector_state state; - - struct { - /* The enum values specifically defined here match nv50/gf119 - * hw values, and the code relies on this. - */ - enum { - DITHERING_MODE_OFF = 0x00, - DITHERING_MODE_ON = 0x01, - DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, - DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, - DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, - DITHERING_MODE_AUTO - } mode; - enum { - DITHERING_DEPTH_6BPC = 0x00, - DITHERING_DEPTH_8BPC = 0x02, - DITHERING_DEPTH_AUTO - } depth; - } dither; - - struct { - int mode; /* DRM_MODE_SCALE_* */ - struct { - enum { - UNDERSCAN_OFF, - UNDERSCAN_ON, - UNDERSCAN_AUTO, - } mode; - u32 hborder; - u32 vborder; - } underscan; - bool full; - } scaler; - - struct { - int color_vibrance; - int vibrant_hue; - } procamp; - - union { - struct { - bool dither:1; - bool scaler:1; - bool procamp:1; - }; - u8 mask; - } set; -}; - void nouveau_conn_attach_properties(struct drm_connector *); void nouveau_conn_reset(struct drm_connector *); struct drm_connector_state * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6f038511a03a..53f9bceaf17a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) struct drm_connector_list_iter conn_iter; int ret; + /* + * Enable hotplug interrupts (done as early as possible, since we need + * them for MST) + */ + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + struct nouveau_connector *conn = nouveau_connector(connector); + nvif_notify_get(&conn->hpd); + } + drm_connector_list_iter_end(&conn_iter); + ret = disp->init(dev, resume, runtime); if (ret) return ret; @@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) */ drm_kms_helper_poll_enable(dev); - /* enable hotplug interrupts */ - drm_connector_list_iter_begin(dev, &conn_iter); - nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { - struct nouveau_connector *conn = nouveau_connector(connector); - nvif_notify_get(&conn->hpd); - } - drm_connector_list_iter_end(&conn_iter); - return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 668d4bd0c118..df9bf1fd1bc0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst) } struct nouveau_svmm { + struct mmu_notifier notifier; struct nouveau_vmm *vmm; struct { unsigned long start; @@ -95,9 +96,6 @@ struct nouveau_svmm { } unmanaged; struct mutex mutex; - - struct mm_struct *mm; - struct hmm_mirror mirror; }; #define SVMM_DBG(s,f,a...) \ @@ -251,10 +249,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) } static int -nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, - const struct mmu_notifier_range *update) +nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn, + const struct mmu_notifier_range *update) { - struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror); + struct nouveau_svmm *svmm = + container_of(mn, struct nouveau_svmm, notifier); unsigned long start = update->start; unsigned long limit = update->end; @@ -264,6 +263,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); mutex_lock(&svmm->mutex); + if (unlikely(!svmm->vmm)) + goto out; + if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { if (start < svmm->unmanaged.start) { nouveau_svmm_invalidate(svmm, start, @@ -273,19 +275,20 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, } nouveau_svmm_invalidate(svmm, start, limit); + +out: mutex_unlock(&svmm->mutex); return 0; } -static void -nouveau_svmm_release(struct hmm_mirror *mirror) +static void nouveau_svmm_free_notifier(struct mmu_notifier *mn) { + kfree(container_of(mn, struct nouveau_svmm, notifier)); } -static const struct hmm_mirror_ops -nouveau_svmm = { - .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables, - .release = nouveau_svmm_release, +static const struct mmu_notifier_ops nouveau_mn_ops = { + .invalidate_range_start = nouveau_svmm_invalidate_range_start, + .free_notifier = nouveau_svmm_free_notifier, }; void @@ -293,8 +296,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm) { struct nouveau_svmm *svmm = *psvmm; if (svmm) { - hmm_mirror_unregister(&svmm->mirror); - kfree(*psvmm); + mutex_lock(&svmm->mutex); + svmm->vmm = NULL; + mutex_unlock(&svmm->mutex); + mmu_notifier_put(&svmm->notifier); *psvmm = NULL; } } @@ -320,7 +325,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data, mutex_lock(&cli->mutex); if (cli->svm.cli) { ret = -EBUSY; - goto done; + goto out_free; } /* Allocate a new GPU VMM that can support SVM (managed by the @@ -335,24 +340,26 @@ nouveau_svmm_init(struct drm_device *dev, void *data, .fault_replay = true, }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm); if (ret) - goto done; - - /* Enable HMM mirroring of CPU address-space to VMM. */ - svmm->mm = get_task_mm(current); - down_write(&svmm->mm->mmap_sem); - svmm->mirror.ops = &nouveau_svmm; - ret = hmm_mirror_register(&svmm->mirror, svmm->mm); - if (ret == 0) { - cli->svm.svmm = svmm; - cli->svm.cli = cli; - } - up_write(&svmm->mm->mmap_sem); - mmput(svmm->mm); + goto out_free; -done: + down_write(¤t->mm->mmap_sem); + svmm->notifier.ops = &nouveau_mn_ops; + ret = __mmu_notifier_register(&svmm->notifier, current->mm); if (ret) - nouveau_svmm_fini(&svmm); + goto out_mm_unlock; + /* Note, ownership of svmm transfers to mmu_notifier */ + + cli->svm.svmm = svmm; + cli->svm.cli = cli; + up_write(¤t->mm->mmap_sem); mutex_unlock(&cli->mutex); + return 0; + +out_mm_unlock: + up_write(¤t->mm->mmap_sem); +out_free: + mutex_unlock(&cli->mutex); + kfree(svmm); return ret; } @@ -475,43 +482,90 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, fault->inst, fault->addr, fault->access); } -static inline bool -nouveau_range_done(struct hmm_range *range) +struct svm_notifier { + struct mmu_interval_notifier notifier; + struct nouveau_svmm *svmm; +}; + +static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) { - bool ret = hmm_range_valid(range); + struct svm_notifier *sn = + container_of(mni, struct svm_notifier, notifier); - hmm_range_unregister(range); - return ret; + /* + * serializes the update to mni->invalidate_seq done by caller and + * prevents invalidation of the PTE from progressing while HW is being + * programmed. This is very hacky and only works because the normal + * notifier that does invalidation is always called after the range + * notifier. + */ + if (mmu_notifier_range_blockable(range)) + mutex_lock(&sn->svmm->mutex); + else if (!mutex_trylock(&sn->svmm->mutex)) + return false; + mmu_interval_set_seq(mni, cur_seq); + mutex_unlock(&sn->svmm->mutex); + return true; } -static int -nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range) +static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = { + .invalidate = nouveau_svm_range_invalidate, +}; + +static int nouveau_range_fault(struct nouveau_svmm *svmm, + struct nouveau_drm *drm, void *data, u32 size, + u64 *pfns, struct svm_notifier *notifier) { + unsigned long timeout = + jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); + /* Have HMM fault pages within the fault window to the GPU. */ + struct hmm_range range = { + .notifier = ¬ifier->notifier, + .start = notifier->notifier.interval_tree.start, + .end = notifier->notifier.interval_tree.last + 1, + .pfns = pfns, + .flags = nouveau_svm_pfn_flags, + .values = nouveau_svm_pfn_values, + .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT, + }; + struct mm_struct *mm = notifier->notifier.mm; long ret; - range->default_flags = 0; - range->pfn_flags_mask = -1UL; + while (true) { + if (time_after(jiffies, timeout)) + return -EBUSY; + + range.notifier_seq = mmu_interval_read_begin(range.notifier); + range.default_flags = 0; + range.pfn_flags_mask = -1UL; + down_read(&mm->mmap_sem); + ret = hmm_range_fault(&range, 0); + up_read(&mm->mmap_sem); + if (ret <= 0) { + if (ret == 0 || ret == -EBUSY) + continue; + return ret; + } - ret = hmm_range_register(range, &svmm->mirror); - if (ret) { - up_read(&svmm->mm->mmap_sem); - return (int)ret; + mutex_lock(&svmm->mutex); + if (mmu_interval_read_retry(range.notifier, + range.notifier_seq)) { + mutex_unlock(&svmm->mutex); + continue; + } + break; } - if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - up_read(&svmm->mm->mmap_sem); - return -EBUSY; - } + nouveau_dmem_convert_pfn(drm, &range); - ret = hmm_range_fault(range, 0); - if (ret <= 0) { - if (ret == 0) - ret = -EBUSY; - up_read(&svmm->mm->mmap_sem); - hmm_range_unregister(range); - return ret; - } - return 0; + svmm->vmm->vmm.object.client->super = true; + ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL); + svmm->vmm->vmm.object.client->super = false; + mutex_unlock(&svmm->mutex); + + return ret; } static int @@ -531,7 +585,6 @@ nouveau_svm_fault(struct nvif_notify *notify) } i; u64 phys[16]; } args; - struct hmm_range range; struct vm_area_struct *vma; u64 inst, start, limit; int fi, fn, pi, fill; @@ -587,6 +640,9 @@ nouveau_svm_fault(struct nvif_notify *notify) args.i.p.version = 0; for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { + struct svm_notifier notifier; + struct mm_struct *mm; + /* Cancel any faults from non-SVM channels. */ if (!(svmm = buffer->fault[fi]->svmm)) { nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); @@ -606,24 +662,32 @@ nouveau_svm_fault(struct nvif_notify *notify) start = max_t(u64, start, svmm->unmanaged.limit); SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); + mm = svmm->notifier.mm; + if (!mmget_not_zero(mm)) { + nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); + continue; + } + /* Intersect fault window with the CPU VMA, cancelling * the fault if the address is invalid. */ - down_read(&svmm->mm->mmap_sem); - vma = find_vma_intersection(svmm->mm, start, limit); + down_read(&mm->mmap_sem); + vma = find_vma_intersection(mm, start, limit); if (!vma) { SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit); - up_read(&svmm->mm->mmap_sem); + up_read(&mm->mmap_sem); + mmput(mm); nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); continue; } start = max_t(u64, start, vma->vm_start); limit = min_t(u64, limit, vma->vm_end); + up_read(&mm->mmap_sem); SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); if (buffer->fault[fi]->addr != start) { SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr); - up_read(&svmm->mm->mmap_sem); + mmput(mm); nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); continue; } @@ -679,33 +743,19 @@ nouveau_svm_fault(struct nvif_notify *notify) args.i.p.addr, args.i.p.addr + args.i.p.size, fn - fi); - /* Have HMM fault pages within the fault window to the GPU. */ - range.start = args.i.p.addr; - range.end = args.i.p.addr + args.i.p.size; - range.pfns = args.phys; - range.flags = nouveau_svm_pfn_flags; - range.values = nouveau_svm_pfn_values; - range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; -again: - ret = nouveau_range_fault(svmm, &range); - if (ret == 0) { - mutex_lock(&svmm->mutex); - if (!nouveau_range_done(&range)) { - mutex_unlock(&svmm->mutex); - goto again; - } - - nouveau_dmem_convert_pfn(svm->drm, &range); - - svmm->vmm->vmm.object.client->super = true; - ret = nvif_object_ioctl(&svmm->vmm->vmm.object, - &args, sizeof(args.i) + - pi * sizeof(args.phys[0]), - NULL); - svmm->vmm->vmm.object.client->super = false; - mutex_unlock(&svmm->mutex); - up_read(&svmm->mm->mmap_sem); + notifier.svmm = svmm; + ret = mmu_interval_notifier_insert(¬ifier.notifier, + svmm->notifier.mm, + args.i.p.addr, args.i.p.size, + &nouveau_svm_mni_ops); + if (!ret) { + ret = nouveau_range_fault( + svmm, svm->drm, &args, + sizeof(args.i) + pi * sizeof(args.phys[0]), + args.phys, ¬ifier); + mmu_interval_notifier_remove(¬ifier.notifier); } + mmput(mm); /* Cancel any faults in the window whose pages didn't manage * to keep their valid bit, or stay writeable when required. @@ -714,10 +764,10 @@ again: */ while (fi < fn) { struct nouveau_svm_fault *fault = buffer->fault[fi++]; - pi = (fault->addr - range.start) >> PAGE_SHIFT; + pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT; if (ret || - !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) || - (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) && + !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) || + (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) && fault->access != 0 && fault->access != 3)) { nouveau_svm_fault_cancel_fault(svm, fault); continue; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index e518d93ca6df..d08ae95ecc0a 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -843,9 +843,13 @@ fail: */ static void omap_gem_unpin_locked(struct drm_gem_object *obj) { + struct omap_drm_private *priv = obj->dev->dev_private; struct omap_gem_object *omap_obj = to_omap_bo(obj); int ret; + if (omap_gem_is_contiguous(omap_obj) || !priv->has_dmm) + return; + if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { ret = tiler_unpin(omap_obj->block); if (ret) { diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index fe8ee77c96e4..413987038fbf 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -19,15 +19,18 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev); static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct panfrost_device *pfdev = dev_get_drvdata(dev); + struct dev_pm_opp *opp; int err; + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) + return PTR_ERR(opp); + dev_pm_opp_put(opp); + err = dev_pm_opp_set_rate(dev, *freq); if (err) return err; - *freq = clk_get_rate(pfdev->clock); - return 0; } @@ -61,20 +64,10 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, return 0; } -static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) -{ - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - - *freq = clk_get_rate(pfdev->clock); - - return 0; -} - static struct devfreq_dev_profile panfrost_devfreq_profile = { .polling_ms = 50, /* ~3 frames */ .target = panfrost_devfreq_target, .get_dev_status = panfrost_devfreq_get_dev_status, - .get_cur_freq = panfrost_devfreq_get_cur_freq, }; int panfrost_devfreq_init(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index f8a8aca9c658..48e3c4165247 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -303,14 +303,17 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, } /* Don't allow mmapping of heap objects as pages are not pinned. */ - if (to_panfrost_bo(gem_obj)->is_heap) - return -EINVAL; + if (to_panfrost_bo(gem_obj)->is_heap) { + ret = -EINVAL; + goto out; + } ret = drm_gem_create_mmap_offset(gem_obj); if (ret == 0) args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_put_unlocked(gem_obj); +out: + drm_gem_object_put_unlocked(gem_obj); return ret; } @@ -347,20 +350,19 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, return -ENOENT; } + mutex_lock(&pfdev->shrinker_lock); args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); if (args->retained) { struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj); - mutex_lock(&pfdev->shrinker_lock); - if (args->madv == PANFROST_MADV_DONTNEED) - list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list); + list_add_tail(&bo->base.madv_list, + &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); - - mutex_unlock(&pfdev->shrinker_lock); } + mutex_unlock(&pfdev->shrinker_lock); drm_gem_object_put_unlocked(gem_obj); return 0; @@ -443,7 +445,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) { struct panfrost_file_priv *panfrost_priv = file->driver_priv; - panfrost_perfcnt_close(panfrost_priv); + panfrost_perfcnt_close(file); panfrost_job_close(panfrost_priv); panfrost_mmu_pgtable_free(panfrost_priv); @@ -552,11 +554,11 @@ static int panfrost_probe(struct platform_device *pdev) return 0; err_out2: + pm_runtime_disable(pfdev->dev); panfrost_devfreq_fini(pfdev); err_out1: panfrost_device_fini(pfdev); err_out0: - pm_runtime_disable(pfdev->dev); drm_dev_put(ddev); return err; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index deca0c30bbd4..fd766b1395fb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -19,6 +19,16 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_device *pfdev = obj->dev->dev_private; + /* + * Make sure the BO is no longer inserted in the shrinker list before + * taking care of the destruction itself. If we don't do that we have a + * race condition between this function and what's done in + * panfrost_gem_shrinker_scan(). + */ + mutex_lock(&pfdev->shrinker_lock); + list_del_init(&bo->base.madv_list); + mutex_unlock(&pfdev->shrinker_lock); + if (bo->sgts) { int i; int n_sgt = bo->base.base.size / SZ_2M; @@ -33,15 +43,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) kfree(bo->sgts); } - mutex_lock(&pfdev->shrinker_lock); - if (!list_empty(&bo->base.madv_list)) - list_del(&bo->base.madv_list); - mutex_unlock(&pfdev->shrinker_lock); - drm_gem_shmem_free_object(obj); } -static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) { int ret; size_t size = obj->size; @@ -80,7 +85,7 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p return ret; } -static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) +void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) { struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_file_priv *priv = file_priv->driver_priv; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 50920819cc16..4b17e7308764 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -45,6 +45,10 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv, u32 flags, uint32_t *handle); +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); +void panfrost_gem_close(struct drm_gem_object *obj, + struct drm_file *file_priv); + void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index bdd990568476..a3ed64a1f15e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } -void panfrost_mmu_flush_range(struct panfrost_device *pfdev, - struct panfrost_mmu *mmu, - u64 iova, size_t size) +static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size) { if (mmu->as < 0) return; @@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) spin_lock(&pfdev->as_lock); list_for_each_entry(mmu, &pfdev->as_lru_list, list) { if (as == mmu->as) - break; + goto found_mmu; } - if (as != mmu->as) - goto out; + goto out; +found_mmu: priv = container_of(mmu, struct panfrost_file_priv, mmu); spin_lock(&priv->mm_lock); @@ -432,7 +432,8 @@ out: #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) -int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) +static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, + u64 addr) { int ret, i; struct panfrost_gem_object *bo; diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 83c57d325ca8..2c04e858c50a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -16,6 +16,7 @@ #include "panfrost_issues.h" #include "panfrost_job.h" #include "panfrost_mmu.h" +#include "panfrost_perfcnt.h" #include "panfrost_regs.h" #define COUNTERS_PER_BLOCK 64 @@ -66,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) } static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user, + struct drm_file *file_priv, unsigned int counterset) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_gem_shmem_object *bo; u32 cfg; @@ -90,14 +92,14 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, perfcnt->bo = to_panfrost_bo(&bo->base); /* Map the perfcnt buf in the address space attached to file_priv. */ - ret = panfrost_mmu_map(perfcnt->bo); + ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv); if (ret) goto err_put_bo; perfcnt->buf = drm_gem_shmem_vmap(&bo->base); if (IS_ERR(perfcnt->buf)) { ret = PTR_ERR(perfcnt->buf); - goto err_put_bo; + goto err_close_bo; } /* @@ -156,14 +158,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, err_vunmap: drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); +err_close_bo: + panfrost_gem_close(&perfcnt->bo->base.base, file_priv); err_put_bo: drm_gem_object_put_unlocked(&bo->base); return ret; } static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user) + struct drm_file *file_priv) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; if (user != perfcnt->user) @@ -179,6 +184,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, perfcnt->user = NULL; drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); perfcnt->buf = NULL; + panfrost_gem_close(&perfcnt->bo->base.base, file_priv); drm_gem_object_put_unlocked(&perfcnt->bo->base.base); perfcnt->bo = NULL; pm_runtime_mark_last_busy(pfdev->dev); @@ -190,7 +196,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = dev->dev_private; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_enable *req = data; @@ -206,10 +211,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, mutex_lock(&perfcnt->lock); if (req->enable) - ret = panfrost_perfcnt_enable_locked(pfdev, pfile, + ret = panfrost_perfcnt_enable_locked(pfdev, file_priv, req->counterset); else - ret = panfrost_perfcnt_disable_locked(pfdev, pfile); + ret = panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); return ret; @@ -247,15 +252,16 @@ out: return ret; } -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile) +void panfrost_perfcnt_close(struct drm_file *file_priv) { + struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = pfile->pfdev; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; pm_runtime_get_sync(pfdev->dev); mutex_lock(&perfcnt->lock); if (perfcnt->user == pfile) - panfrost_perfcnt_disable_locked(pfdev, pfile); + panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h index 13b8fdaa1b43..8bbcf5f5fb33 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h @@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev); void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev); int panfrost_perfcnt_init(struct panfrost_device *pfdev); void panfrost_perfcnt_fini(struct panfrost_device *pfdev); -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile); +void panfrost_perfcnt_close(struct drm_file *file_priv); int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv); int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 6f38375c77c8..911735f8d5de 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -412,7 +412,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *dig_connector; int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); u8 tmp; @@ -423,8 +422,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, if (!radeon_connector->con_priv) return panel_mode; - dig_connector = radeon_connector->con_priv; - if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 8ff518eefff2..4fa488cedd55 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev) else actual_temp = temp & 0x1ff; - actual_temp = actual_temp * 1000; - - return actual_temp; + return actual_temp * 1000; } /* get temperature in millidegrees */ @@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev) else actual_temp = 0; - actual_temp = actual_temp * 1000; - - return actual_temp; + return actual_temp * 1000; } /* @@ -6969,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* XXX this should actually be a bus address, not an MC address. same on older asics */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN @@ -9504,7 +9500,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) { struct pci_dev *root = rdev->pdev->bus->self; enum pci_bus_speed speed_cap; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -9546,12 +9541,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(rdev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev)) return; if (speed_cap == PCIE_SPEED_8_0GT) { @@ -9561,14 +9551,17 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL, + &gpu_cfg); tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16); tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1); max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; @@ -9586,15 +9579,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) for (i = 0; i < 10; i++) { /* check status */ - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp |= LC_SET_QUIESCE; @@ -9607,26 +9608,45 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) msleep(100); /* linkctl */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, + tmp16); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(rdev->pdev, + PCI_EXP_LNKCTL, + tmp16); /* linkctl2 */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(rdev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -9640,15 +9660,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; if (speed_cap == PCIE_SPEED_8_0GT) - tmp16 |= 3; /* gen3 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (speed_cap == PCIE_SPEED_5_0GT) - tmp16 |= 2; /* gen2 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; /* gen1 */ - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 0017aa7a9f17..29c966f3407e 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1826,8 +1826,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; - track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); - track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); + track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); + track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); } if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) track->textures[i].tex_coord_type = 2; diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 840401413c58..f5f2ffea5ab2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = 1; } else { track->textures[i].use_pitch = 0; - track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); - track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); + track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); + track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); } if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) track->textures[i].lookup_disable = true; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0d453aa09352..d2e51a9433f5 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* set dummy read address to ring address */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index d6c28a5d77ab..49e8266461f8 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -350,7 +350,7 @@ static void r600_cs_track_init(struct r600_cs_track *track) static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) { struct r600_cs_track *track = p->track; - u32 slice_tile_max, size, tmp; + u32 slice_tile_max, tmp; u32 height, height_align, pitch, pitch_align, depth_align; u64 base_offset, base_align; struct array_mode_checker array_check; @@ -360,7 +360,6 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* When resolve is used, the second colorbuffer has always 1 sample. */ unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; - size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); if (!r600_fmt_is_valid_color(format)) { dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", @@ -517,7 +516,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) static int r600_cs_track_validate_db(struct radeon_cs_parser *p) { struct r600_cs_track *track = p->track; - u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; + u32 nviews, bpe, ntiles, slice_tile_max, tmp; u32 height_align, pitch_align, depth_align; u32 pitch = 8192; u32 height = 8192; @@ -564,7 +563,6 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) } ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); } else { - size = radeon_bo_size(track->db_bo); /* pitch in pixels */ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; @@ -2342,7 +2340,6 @@ int r600_cs_parse(struct radeon_cs_parser *p) int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, struct radeon_bo_list **cs_reloc) { - struct radeon_cs_chunk *relocs_chunk; unsigned idx; *cs_reloc = NULL; @@ -2350,7 +2347,6 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } - relocs_chunk = p->chunk_relocs; idx = p->dma_reloc_idx; if (idx >= p->nrelocs) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d59b004f6695..30e32adc1fc6 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -68,6 +68,10 @@ #include <linux/hashtable.h> #include <linux/dma-fence.h> +#ifdef CONFIG_MMU_NOTIFIER +#include <linux/mmu_notifier.h> +#endif + #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> @@ -509,8 +513,9 @@ struct radeon_bo { struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; - struct radeon_mn *mn; - struct list_head mn_list; +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_interval_notifier notifier; +#endif }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base) diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index b9aea5776d3d..8c63ccb8b623 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -288,7 +288,7 @@ static void radeon_audio_interface_init(struct radeon_device *rdev) } else { rdev->audio.funcs = &r600_funcs; rdev->audio.hdmi_funcs = &r600_hdmi_funcs; - rdev->audio.dp_funcs = 0; + rdev->audio.dp_funcs = NULL; } } @@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) return; sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 628a7a499186..c3e49c973812 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2639,7 +2639,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; u16 offset, misc, misc2 = 0; - u8 rev, blocks, tmp; + u8 rev, tmp; int state_index = 0; struct radeon_i2c_bus_rec i2c_bus; @@ -2732,7 +2732,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); if (offset) { rev = RBIOS8(offset); - blocks = RBIOS8(offset + 0x2); /* power mode 0 tends to be the only valid one */ rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index dfb921899853..962575e27cde 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1687,7 +1687,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_encoder *radeon_encoder; struct drm_connector *connector; - struct radeon_connector *radeon_connector; bool first = true; u32 src_v = 1, dst_v = 1; u32 src_h = 1, dst_h = 1; @@ -1700,7 +1699,6 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, continue; radeon_encoder = to_radeon_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder); - radeon_connector = to_radeon_connector(connector); if (first) { /* set scaling */ diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2994f07fbad9..ee28f5b3785e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) return &radeon_connector->mst_encoder->base; } +static int +radeon_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + struct radeon_connector *master = radeon_connector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + radeon_connector->port); +} + static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = { .get_modes = radeon_dp_mst_get_modes, .mode_valid = radeon_dp_mst_mode_valid, .best_encoder = radeon_mst_best_encoder, + .detect_ctx = radeon_dp_mst_detect, }; -static enum drm_connector_status -radeon_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector *master = radeon_connector->mst_port; - - return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port); -} - static void radeon_dp_mst_connector_destroy(struct drm_connector *connector) { @@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector) static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { .dpms = drm_helper_connector_dpms, - .detect = radeon_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_dp_mst_connector_destroy, }; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 888e0f384c61..fd74e2611185 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -384,6 +384,17 @@ radeon_pci_shutdown(struct pci_dev *pdev) */ if (radeon_device_is_virtual()) radeon_pci_remove(pdev); + +#ifdef CONFIG_PPC64 + /* + * Some adapters need to be suspended before a + * shutdown occurs in order to prevent an error + * during kexec. + * Make this power specific becauase it breaks + * some non-power boards. + */ + radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false); +#endif } static int radeon_pmops_suspend(struct device *dev) diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index f132eec737ad..d9df7f311e76 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c @@ -537,7 +537,7 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl; uint32_t tv_modulator_cntl1, tv_modulator_cntl2; uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2; - uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal; + uint32_t tv_pll_cntl, tv_ftotal; uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl; uint32_t m, n, p; const uint16_t *hor_timing; @@ -709,12 +709,6 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, (((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) | ((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT); - tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) | - ((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) | - ((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) | - RADEON_TVCLK_SRC_SEL_TVPLL | - RADEON_TVPLL_TEST_DIS); - tv_dac->tv.tv_uv_adr = 0xc8; if (tv_dac->tv_std == TV_STD_NTSC || diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index dbab9a3a969b..f93829f08a4d 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -36,131 +36,51 @@ #include "radeon.h" -struct radeon_mn { - struct mmu_notifier mn; - - /* objects protected by lock */ - struct mutex lock; - struct rb_root_cached objects; -}; - -struct radeon_mn_node { - struct interval_tree_node it; - struct list_head bos; -}; - /** - * radeon_mn_invalidate_range_start - callback to notify about mm change + * radeon_mn_invalidate - callback to notify about mm change * * @mn: our notifier - * @mn: the mm this callback is about - * @start: start of updated range - * @end: end of updated range + * @range: the VMA under invalidation * * We block for all BOs between start and end to be idle and * unmap them by move them into system domain again. */ -static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) +static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, + const struct mmu_notifier_range *range, + unsigned long cur_seq) { - struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier); struct ttm_operation_ctx ctx = { false, false }; - struct interval_tree_node *it; - unsigned long end; - int ret = 0; - - /* notification is exclusive, but interval is inclusive */ - end = range->end - 1; - - /* TODO we should be able to split locking for interval tree and - * the tear down. - */ - if (mmu_notifier_range_blockable(range)) - mutex_lock(&rmn->lock); - else if (!mutex_trylock(&rmn->lock)) - return -EAGAIN; - - it = interval_tree_iter_first(&rmn->objects, range->start, end); - while (it) { - struct radeon_mn_node *node; - struct radeon_bo *bo; - long r; - - if (!mmu_notifier_range_blockable(range)) { - ret = -EAGAIN; - goto out_unlock; - } - - node = container_of(it, struct radeon_mn_node, it); - it = interval_tree_iter_next(it, range->start, end); + long r; - list_for_each_entry(bo, &node->bos, mn_list) { + if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + return true; - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) - continue; + if (!mmu_notifier_range_blockable(range)) + return false; - r = radeon_bo_reserve(bo, true); - if (r) { - DRM_ERROR("(%ld) failed to reserve user bo\n", r); - continue; - } - - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - DRM_ERROR("(%ld) failed to wait for user bo\n", r); - - radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - DRM_ERROR("(%ld) failed to validate user bo\n", r); - - radeon_bo_unreserve(bo); - } + r = radeon_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%ld) failed to reserve user bo\n", r); + return true; } - -out_unlock: - mutex_unlock(&rmn->lock); - - return ret; -} - -static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) -{ - struct mmu_notifier_range range = { - .mm = mm, - .start = 0, - .end = ULONG_MAX, - .flags = 0, - .event = MMU_NOTIFY_UNMAP, - }; - - radeon_mn_invalidate_range_start(mn, &range); -} - -static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm) -{ - struct radeon_mn *rmn; - rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); - if (!rmn) - return ERR_PTR(-ENOMEM); + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); - mutex_init(&rmn->lock); - rmn->objects = RB_ROOT_CACHED; - return &rmn->mn; -} + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + DRM_ERROR("(%ld) failed to validate user bo\n", r); -static void radeon_mn_free_notifier(struct mmu_notifier *mn) -{ - kfree(container_of(mn, struct radeon_mn, mn)); + radeon_bo_unreserve(bo); + return true; } -static const struct mmu_notifier_ops radeon_mn_ops = { - .release = radeon_mn_release, - .invalidate_range_start = radeon_mn_invalidate_range_start, - .alloc_notifier = radeon_mn_alloc_notifier, - .free_notifier = radeon_mn_free_notifier, +static const struct mmu_interval_notifier_ops radeon_mn_ops = { + .invalidate = radeon_mn_invalidate, }; /** @@ -174,51 +94,20 @@ static const struct mmu_notifier_ops radeon_mn_ops = { */ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) { - unsigned long end = addr + radeon_bo_size(bo) - 1; - struct mmu_notifier *mn; - struct radeon_mn *rmn; - struct radeon_mn_node *node = NULL; - struct list_head bos; - struct interval_tree_node *it; - - mn = mmu_notifier_get(&radeon_mn_ops, current->mm); - if (IS_ERR(mn)) - return PTR_ERR(mn); - rmn = container_of(mn, struct radeon_mn, mn); - - INIT_LIST_HEAD(&bos); - - mutex_lock(&rmn->lock); - - while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { - kfree(node); - node = container_of(it, struct radeon_mn_node, it); - interval_tree_remove(&node->it, &rmn->objects); - addr = min(it->start, addr); - end = max(it->last, end); - list_splice(&node->bos, &bos); - } - - if (!node) { - node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL); - if (!node) { - mutex_unlock(&rmn->lock); - return -ENOMEM; - } - } - - bo->mn = rmn; - - node->it.start = addr; - node->it.last = end; - INIT_LIST_HEAD(&node->bos); - list_splice(&bos, &node->bos); - list_add(&bo->mn_list, &node->bos); - - interval_tree_insert(&node->it, &rmn->objects); - - mutex_unlock(&rmn->lock); - + int ret; + + ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, + radeon_bo_size(bo), &radeon_mn_ops); + if (ret) + return ret; + + /* + * FIXME: radeon appears to allow get_user_pages to run during + * invalidate_range_start/end, which is not a safe way to read the + * PTEs. It should use the mmu_interval_read_begin() scheme around the + * get_user_pages to ensure that the PTEs are read properly + */ + mmu_interval_read_begin(&bo->notifier); return 0; } @@ -231,27 +120,8 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) */ void radeon_mn_unregister(struct radeon_bo *bo) { - struct radeon_mn *rmn = bo->mn; - struct list_head *head; - - if (!rmn) + if (!bo->notifier.mm) return; - - mutex_lock(&rmn->lock); - /* save the next list entry for later */ - head = bo->mn_list.next; - - list_del(&bo->mn_list); - - if (list_empty(head)) { - struct radeon_mn_node *node; - node = container_of(head, struct radeon_mn_node, bos); - interval_tree_remove(&node->it, &rmn->objects); - kfree(node); - } - - mutex_unlock(&rmn->lock); - - mmu_notifier_put(&rmn->mn); - bo->mn = NULL; + mmu_interval_notifier_remove(&bo->notifier); + bo->notifier.mm = NULL; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 294a44525302..8788a0564582 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev) /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; - } + } switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; @@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* set dummy read address to ring address */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN @@ -7087,7 +7087,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) { struct pci_dev *root = rdev->pdev->bus->self; enum pci_bus_speed speed_cap; - int bridge_pos, gpu_pos; u32 speed_cntl, current_data_rate; int i; u16 tmp16; @@ -7129,12 +7128,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); } - bridge_pos = pci_pcie_cap(root); - if (!bridge_pos) - return; - - gpu_pos = pci_pcie_cap(rdev->pdev); - if (!gpu_pos) + if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev)) return; if (speed_cap == PCIE_SPEED_8_0GT) { @@ -7144,14 +7138,17 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) u16 bridge_cfg2, gpu_cfg2; u32 max_lw, current_lw, tmp; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL, + &gpu_cfg); tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16); tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL, + tmp16); tmp = RREG32_PCIE(PCIE_LC_STATUS1); max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; @@ -7169,15 +7166,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) for (i = 0; i < 10; i++) { /* check status */ - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_DEVSTA, + &tmp16); if (tmp16 & PCI_EXP_DEVSTA_TRPND) break; - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &bridge_cfg); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL, + &gpu_cfg); - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &bridge_cfg2); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &gpu_cfg2); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp |= LC_SET_QUIESCE; @@ -7190,26 +7195,46 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) msleep(100); /* linkctl */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL, + tmp16); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL, + &tmp16); tmp16 &= ~PCI_EXP_LNKCTL_HAWD; tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + pcie_capability_write_word(rdev->pdev, + PCI_EXP_LNKCTL, + tmp16); /* linkctl2 */ - pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); - - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~((1 << 4) | (7 << 9)); - tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + pcie_capability_read_word(root, PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(root, + PCI_EXP_LNKCTL2, + tmp16); + + pcie_capability_read_word(rdev->pdev, + PCI_EXP_LNKCTL2, + &tmp16); + tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN); + tmp16 |= (gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_write_word(rdev->pdev, + PCI_EXP_LNKCTL2, + tmp16); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -7223,15 +7248,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~0xf; + pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL2_TLS; if (speed_cap == PCIE_SPEED_8_0GT) - tmp16 |= 3; /* gen3 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (speed_cap == PCIE_SPEED_5_0GT) - tmp16 |= 2; /* gen2 */ + tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else - tmp16 |= 1; /* gen1 */ - pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ + pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index d0ebf72ba5ff..05e8b4d0af3f 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1957,6 +1957,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: @@ -3638,14 +3639,13 @@ static int si_notify_smc_display_change(struct radeon_device *rdev, static void si_program_response_times(struct radeon_device *rdev) { - u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out; + u32 voltage_response_time, acpi_delay_time, vbi_time_out; u32 vddc_dly, acpi_dly, vbi_dly; u32 reference_clock; si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time; - backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time; if (voltage_response_time == 0) voltage_response_time = 1000; @@ -5898,7 +5898,7 @@ static int si_patch_single_dependency_table_based_on_leakage(struct radeon_devic static int si_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev) { - int ret = 0; + int ret; ret = si_patch_single_dependency_table_based_on_leakage(rdev, &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 1529849e217e..0919f1f159a4 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -4,6 +4,7 @@ config DRM_RCAR_DU depends on DRM && OF depends on ARM || ARM64 depends on ARCH_RENESAS || COMPILE_TEST + imply DRM_RCAR_CMM imply DRM_RCAR_LVDS select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER @@ -13,6 +14,13 @@ config DRM_RCAR_DU Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. +config DRM_RCAR_CMM + tristate "R-Car DU Color Management Module (CMM) Support" + depends on DRM && OF + depends on DRM_RCAR_DU + help + Enable support for R-Car Color Management Module (CMM). + config DRM_RCAR_DW_HDMI tristate "R-Car DU Gen3 HDMI Encoder Support" depends on DRM && OF diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 6c2ed9c46467..4d1187ccc3e5 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -15,6 +15,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \ rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o +obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c new file mode 100644 index 000000000000..c578095b09a5 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * rcar_cmm.c -- R-Car Display Unit Color Management Module + * + * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org> + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include <drm/drm_color_mgmt.h> + +#include "rcar_cmm.h" + +#define CM2_LUT_CTRL 0x0000 +#define CM2_LUT_CTRL_LUT_EN BIT(0) +#define CM2_LUT_TBL_BASE 0x0600 +#define CM2_LUT_TBL(__i) (CM2_LUT_TBL_BASE + (__i) * 4) + +struct rcar_cmm { + void __iomem *base; + + /* + * @lut: 1D-LUT state + * @lut.enabled: 1D-LUT enabled flag + */ + struct { + bool enabled; + } lut; +}; + +static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg) +{ + return ioread32(rcmm->base + reg); +} + +static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data) +{ + iowrite32(data, rcmm->base + reg); +} + +/* + * rcar_cmm_lut_write() - Scale the DRM LUT table entries to hardware precision + * and write to the CMM registers + * @rcmm: Pointer to the CMM device + * @drm_lut: Pointer to the DRM LUT table + */ +static void rcar_cmm_lut_write(struct rcar_cmm *rcmm, + const struct drm_color_lut *drm_lut) +{ + unsigned int i; + + for (i = 0; i < CM2_LUT_SIZE; ++i) { + u32 entry = drm_color_lut_extract(drm_lut[i].red, 8) << 16 + | drm_color_lut_extract(drm_lut[i].green, 8) << 8 + | drm_color_lut_extract(drm_lut[i].blue, 8); + + rcar_cmm_write(rcmm, CM2_LUT_TBL(i), entry); + } +} + +/* + * rcar_cmm_setup() - Configure the CMM unit + * @pdev: The platform device associated with the CMM instance + * @config: The CMM unit configuration + * + * Configure the CMM unit with the given configuration. Currently enabling, + * disabling and programming of the 1-D LUT unit is supported. + * + * As rcar_cmm_setup() accesses the CMM registers the unit should be powered + * and its functional clock enabled. To guarantee this, before any call to + * this function is made, the CMM unit has to be enabled by calling + * rcar_cmm_enable() first. + * + * TODO: Add support for LUT double buffer operations to avoid updating the + * LUT table entries while a frame is being displayed. + */ +int rcar_cmm_setup(struct platform_device *pdev, + const struct rcar_cmm_config *config) +{ + struct rcar_cmm *rcmm = platform_get_drvdata(pdev); + + /* Disable LUT if no table is provided. */ + if (!config->lut.table) { + if (rcmm->lut.enabled) { + rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0); + rcmm->lut.enabled = false; + } + + return 0; + } + + /* Enable LUT and program the new gamma table values. */ + if (!rcmm->lut.enabled) { + rcar_cmm_write(rcmm, CM2_LUT_CTRL, CM2_LUT_CTRL_LUT_EN); + rcmm->lut.enabled = true; + } + + rcar_cmm_lut_write(rcmm, config->lut.table); + + return 0; +} +EXPORT_SYMBOL_GPL(rcar_cmm_setup); + +/* + * rcar_cmm_enable() - Enable the CMM unit + * @pdev: The platform device associated with the CMM instance + * + * When the output of the corresponding DU channel is routed to the CMM unit, + * the unit shall be enabled before the DU channel is started, and remain + * enabled until the channel is stopped. The CMM unit shall be disabled with + * rcar_cmm_disable(). + * + * Calls to rcar_cmm_enable() and rcar_cmm_disable() are not reference-counted. + * It is an error to attempt to enable an already enabled CMM unit, or to + * attempt to disable a disabled unit. + */ +int rcar_cmm_enable(struct platform_device *pdev) +{ + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rcar_cmm_enable); + +/* + * rcar_cmm_disable() - Disable the CMM unit + * @pdev: The platform device associated with the CMM instance + * + * See rcar_cmm_enable() for usage information. + * + * Disabling the CMM unit disable all the internal processing blocks. The CMM + * state shall thus be restored with rcar_cmm_setup() when re-enabling the CMM + * unit after the next rcar_cmm_enable() call. + */ +void rcar_cmm_disable(struct platform_device *pdev) +{ + struct rcar_cmm *rcmm = platform_get_drvdata(pdev); + + rcar_cmm_write(rcmm, CM2_LUT_CTRL, 0); + rcmm->lut.enabled = false; + + pm_runtime_put(&pdev->dev); +} +EXPORT_SYMBOL_GPL(rcar_cmm_disable); + +/* + * rcar_cmm_init() - Initialize the CMM unit + * @pdev: The platform device associated with the CMM instance + * + * Return: 0 on success, -EPROBE_DEFER if the CMM is not available yet, + * -ENODEV if the DRM_RCAR_CMM config option is disabled + */ +int rcar_cmm_init(struct platform_device *pdev) +{ + struct rcar_cmm *rcmm = platform_get_drvdata(pdev); + + if (!rcmm) + return -EPROBE_DEFER; + + return 0; +} +EXPORT_SYMBOL_GPL(rcar_cmm_init); + +static int rcar_cmm_probe(struct platform_device *pdev) +{ + struct rcar_cmm *rcmm; + + rcmm = devm_kzalloc(&pdev->dev, sizeof(*rcmm), GFP_KERNEL); + if (!rcmm) + return -ENOMEM; + platform_set_drvdata(pdev, rcmm); + + rcmm->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rcmm->base)) + return PTR_ERR(rcmm->base); + + pm_runtime_enable(&pdev->dev); + + return 0; +} + +static int rcar_cmm_remove(struct platform_device *pdev) +{ + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id rcar_cmm_of_table[] = { + { .compatible = "renesas,rcar-gen3-cmm", }, + { .compatible = "renesas,rcar-gen2-cmm", }, + { }, +}; +MODULE_DEVICE_TABLE(of, rcar_cmm_of_table); + +static struct platform_driver rcar_cmm_platform_driver = { + .probe = rcar_cmm_probe, + .remove = rcar_cmm_remove, + .driver = { + .name = "rcar-cmm", + .of_match_table = rcar_cmm_of_table, + }, +}; + +module_platform_driver(rcar_cmm_platform_driver); + +MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>"); +MODULE_DESCRIPTION("Renesas R-Car CMM Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.h b/drivers/gpu/drm/rcar-du/rcar_cmm.h new file mode 100644 index 000000000000..b5f7ec6db04a --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_cmm.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rcar_cmm.h -- R-Car Display Unit Color Management Module + * + * Copyright (C) 2019 Jacopo Mondi <jacopo+renesas@jmondi.org> + */ + +#ifndef __RCAR_CMM_H__ +#define __RCAR_CMM_H__ + +#define CM2_LUT_SIZE 256 + +struct drm_color_lut; +struct platform_device; + +/** + * struct rcar_cmm_config - CMM configuration + * + * @lut: 1D-LUT configuration + * @lut.table: 1D-LUT table entries. Disable LUT operations when NULL + */ +struct rcar_cmm_config { + struct { + struct drm_color_lut *table; + } lut; +}; + +#if IS_ENABLED(CONFIG_DRM_RCAR_CMM) +int rcar_cmm_init(struct platform_device *pdev); + +int rcar_cmm_enable(struct platform_device *pdev); +void rcar_cmm_disable(struct platform_device *pdev); + +int rcar_cmm_setup(struct platform_device *pdev, + const struct rcar_cmm_config *config); +#else +static inline int rcar_cmm_init(struct platform_device *pdev) +{ + return -ENODEV; +} + +static inline int rcar_cmm_enable(struct platform_device *pdev) +{ + return 0; +} + +static inline void rcar_cmm_disable(struct platform_device *pdev) +{ +} + +static inline int rcar_cmm_setup(struct platform_device *pdev, + const struct rcar_cmm_config *config) +{ + return 0; +} +#endif /* IS_ENABLED(CONFIG_DRM_RCAR_CMM) */ + +#endif /* __RCAR_CMM_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index f2ae4c410244..d73e88ddecd0 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -22,6 +22,7 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> +#include "rcar_cmm.h" #include "rcar_du_crtc.h" #include "rcar_du_drv.h" #include "rcar_du_encoder.h" @@ -476,6 +477,45 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc) } /* ----------------------------------------------------------------------------- + * Color Management Module (CMM) + */ + +static int rcar_du_cmm_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct drm_property_blob *drm_lut = state->gamma_lut; + struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct device *dev = rcrtc->dev->dev; + + if (!drm_lut) + return 0; + + /* We only accept fully populated LUT tables. */ + if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) { + dev_err(dev, "invalid gamma lut size: %zu bytes\n", + drm_lut->length); + return -EINVAL; + } + + return 0; +} + +static void rcar_du_cmm_setup(struct drm_crtc *crtc) +{ + struct drm_property_blob *drm_lut = crtc->state->gamma_lut; + struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct rcar_cmm_config cmm_config = {}; + + if (!rcrtc->cmm) + return; + + if (drm_lut) + cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data; + + rcar_cmm_setup(rcrtc->cmm, &cmm_config); +} + +/* ----------------------------------------------------------------------------- * Start/Stop and Suspend/Resume */ @@ -620,6 +660,9 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_disable(rcrtc); + if (rcrtc->cmm) + rcar_cmm_disable(rcrtc->cmm); + /* * Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. @@ -643,6 +686,11 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, { struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state); struct drm_encoder *encoder; + int ret; + + ret = rcar_du_cmm_check(crtc, state); + if (ret) + return ret; /* Store the routes from the CRTC output to the DU outputs. */ rstate->outputs = 0; @@ -668,6 +716,8 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state); struct rcar_du_device *rcdu = rcrtc->dev; + if (rcrtc->cmm) + rcar_cmm_enable(rcrtc->cmm); rcar_du_crtc_get(rcrtc); /* @@ -688,6 +738,13 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, } rcar_du_crtc_start(rcrtc); + + /* + * TODO: The chip manual indicates that CMM tables should be written + * after the DU channel has been activated. Investigate the impact + * of this restriction on the first displayed frame. + */ + rcar_du_cmm_setup(crtc); } static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, @@ -743,6 +800,10 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, */ rcar_du_crtc_get(rcrtc); + /* If the active state changed, we let .atomic_enable handle CMM. */ + if (crtc->state->color_mgmt_changed && !crtc->state->active_changed) + rcar_du_cmm_setup(crtc); + if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_atomic_begin(rcrtc); } @@ -1079,6 +1140,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = { .set_crc_source = rcar_du_crtc_set_crc_source, .verify_crc_source = rcar_du_crtc_verify_crc_source, .get_crc_sources = rcar_du_crtc_get_crc_sources, + .gamma_set = drm_atomic_helper_legacy_gamma_set, }; /* ----------------------------------------------------------------------------- @@ -1198,6 +1260,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, if (ret < 0) return ret; + /* CMM might be disabled for this CRTC. */ + if (rcdu->cmms[swindex]) { + rcrtc->cmm = rcdu->cmms[swindex]; + rgrp->cmms_mask |= BIT(hwindex % 2); + + drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE); + drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE); + } + drm_crtc_helper_add(crtc, &crtc_helper_funcs); /* Start with vertical blanking interrupt reporting disabled. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 3b7fc668996f..5f2940c42225 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -39,6 +39,7 @@ struct rcar_du_vsp; * @vblank_wait: wait queue used to signal vertical blanking * @vblank_count: number of vertical blanking interrupts to wait for * @group: CRTC group this CRTC belongs to + * @cmm: CMM associated with this CRTC * @vsp: VSP feeding video to this CRTC * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC * @writeback: the writeback connector @@ -64,6 +65,7 @@ struct rcar_du_crtc { unsigned int vblank_count; struct rcar_du_group *group; + struct platform_device *cmm; struct rcar_du_vsp *vsp; unsigned int vsp_pipe; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index f266c17b907a..654e2dd08146 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -399,7 +399,10 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = { | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(0), .routes = { - /* R8A77970 has one RGB output and one LVDS output. */ + /* + * R8A77970 and R8A77980 have one RGB output and one LVDS + * output. + */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(0), .port = 0, @@ -457,6 +460,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info }, { .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info }, { .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info }, + { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info }, { .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info }, { .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info }, { } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 1327cd0df90a..61504c54e2ec 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/wait.h> +#include "rcar_cmm.h" #include "rcar_du_crtc.h" #include "rcar_du_group.h" #include "rcar_du_vsp.h" @@ -85,6 +86,7 @@ struct rcar_du_device { struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX]; struct rcar_du_group groups[RCAR_DU_MAX_GROUPS]; + struct platform_device *cmms[RCAR_DU_MAX_CRTCS]; struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS]; struct { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 9eee47969e77..88a783ceb3e9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -135,6 +135,7 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp) static void rcar_du_group_setup(struct rcar_du_group *rgrp) { struct rcar_du_device *rcdu = rgrp->dev; + u32 defr7 = DEFR7_CODE; /* Enable extended features */ rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE); @@ -147,6 +148,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) rcar_du_group_setup_pins(rgrp); + /* + * TODO: Handle routing of the DU output to CMM dynamically, as we + * should bypass CMM completely when no color management feature is + * used. + */ + defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) | + (rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0); + rcar_du_group_write(rgrp, DEFR7, defr7); + if (rcdu->info->gen >= 2) { rcar_du_group_setup_defr8(rgrp); rcar_du_group_setup_didsr(rgrp); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index 87950c1f6a52..e9906609c635 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -22,6 +22,7 @@ struct rcar_du_device; * @mmio_offset: registers offset in the device memory map * @index: group index * @channels_mask: bitmask of populated DU channels in this group + * @cmms_mask: bitmask of available CMMs in this group * @num_crtcs: number of CRTCs in this group (1 or 2) * @use_count: number of users of the group (rcar_du_group_(get|put)) * @used_crtcs: number of CRTCs currently in use @@ -37,6 +38,7 @@ struct rcar_du_group { unsigned int index; unsigned int channels_mask; + unsigned int cmms_mask; unsigned int num_crtcs; unsigned int use_count; unsigned int used_crtcs; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 0d59f390de19..fcfd916227d1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -17,7 +17,9 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> +#include <linux/device.h> #include <linux/of_graph.h> +#include <linux/of_platform.h> #include <linux/wait.h> #include "rcar_du_crtc.h" @@ -542,6 +544,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu) static int rcar_du_vsps_init(struct rcar_du_device *rcdu) { const struct device_node *np = rcdu->dev->of_node; + const char *vsps_prop_name = "renesas,vsps"; struct of_phandle_args args; struct { struct device_node *np; @@ -557,15 +560,21 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu) * entry contains a pointer to the VSP DT node and a bitmask of the * connected DU CRTCs. */ - cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1; + ret = of_property_count_u32_elems(np, vsps_prop_name); + if (ret < 0) { + /* Backward compatibility with old DTBs. */ + vsps_prop_name = "vsps"; + ret = of_property_count_u32_elems(np, vsps_prop_name); + } + cells = ret / rcdu->num_crtcs - 1; if (cells > 1) return -EINVAL; for (i = 0; i < rcdu->num_crtcs; ++i) { unsigned int j; - ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i, - &args); + ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name, + cells, i, &args); if (ret < 0) goto error; @@ -587,8 +596,8 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu) /* * Store the VSP pointer and pipe index in the CRTC. If the - * second cell of the 'vsps' specifier isn't present, default - * to 0 to remain compatible with older DT bindings. + * second cell of the 'renesas,vsps' specifier isn't present, + * default to 0 to remain compatible with older DT bindings. */ rcdu->crtcs[i].vsp = &rcdu->vsps[j]; rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0; @@ -618,6 +627,75 @@ error: return ret; } +static int rcar_du_cmm_init(struct rcar_du_device *rcdu) +{ + const struct device_node *np = rcdu->dev->of_node; + unsigned int i; + int cells; + + cells = of_property_count_u32_elems(np, "renesas,cmms"); + if (cells == -EINVAL) + return 0; + + if (cells > rcdu->num_crtcs) { + dev_err(rcdu->dev, + "Invalid number of entries in 'renesas,cmms'\n"); + return -EINVAL; + } + + for (i = 0; i < cells; ++i) { + struct platform_device *pdev; + struct device_link *link; + struct device_node *cmm; + int ret; + + cmm = of_parse_phandle(np, "renesas,cmms", i); + if (IS_ERR(cmm)) { + dev_err(rcdu->dev, + "Failed to parse 'renesas,cmms' property\n"); + return PTR_ERR(cmm); + } + + if (!of_device_is_available(cmm)) { + /* It's fine to have a phandle to a non-enabled CMM. */ + of_node_put(cmm); + continue; + } + + pdev = of_find_device_by_node(cmm); + if (IS_ERR(pdev)) { + dev_err(rcdu->dev, "No device found for CMM%u\n", i); + of_node_put(cmm); + return PTR_ERR(pdev); + } + + of_node_put(cmm); + + /* + * -ENODEV is used to report that the CMM config option is + * disabled: return 0 and let the DU continue probing. + */ + ret = rcar_cmm_init(pdev); + if (ret) + return ret == -ENODEV ? 0 : ret; + + /* + * Enforce suspend/resume ordering by making the CMM a provider + * of the DU: CMM is suspended after and resumed before the DU. + */ + link = device_link_add(rcdu->dev, &pdev->dev, DL_FLAG_STATELESS); + if (!link) { + dev_err(rcdu->dev, + "Failed to create device link to CMM%u\n", i); + return -EINVAL; + } + + rcdu->cmms[i] = pdev; + } + + return 0; +} + int rcar_du_modeset_init(struct rcar_du_device *rcdu) { static const unsigned int mmio_offsets[] = { @@ -708,6 +786,11 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) return ret; } + /* Initialize the Color Management Modules. */ + ret = rcar_du_cmm_init(rcdu); + if (ret) + return ret; + /* Create the CRTCs. */ for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) { struct rcar_du_group *rgrp; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index bc87f080b170..fb9964949368 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -197,6 +197,11 @@ #define DEFR6_MLOS1 (1 << 2) #define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1) +#define DEFR7 0x000ec +#define DEFR7_CODE (0x7779 << 16) +#define DEFR7_CMME1 BIT(6) +#define DEFR7_CMME0 BIT(4) + /* ----------------------------------------------------------------------------- * R8A7790-only Control Registers */ diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 2cf44b91853c..8ffa4fbbdeb3 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -21,6 +21,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> @@ -36,6 +37,12 @@ enum rcar_lvds_mode { RCAR_LVDS_MODE_VESA = 4, }; +enum rcar_lvds_link_type { + RCAR_LVDS_SINGLE_LINK = 0, + RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 1, + RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 2, +}; + #define RCAR_LVDS_QUIRK_LANES BIT(0) /* LVDS lanes 1 and 3 inverted */ #define RCAR_LVDS_QUIRK_GEN3_LVEN BIT(1) /* LVEN bit needs to be set on R8A77970/R8A7799x */ #define RCAR_LVDS_QUIRK_PWD BIT(2) /* PWD bit available (all of Gen3 but E3) */ @@ -65,11 +72,8 @@ struct rcar_lvds { struct clk *dotclkin[2]; /* External DU clocks */ } clocks; - struct drm_display_mode display_mode; - enum rcar_lvds_mode mode; - struct drm_bridge *companion; - bool dual_link; + enum rcar_lvds_link_type link_type; }; #define bridge_to_rcar_lvds(b) \ @@ -402,10 +406,53 @@ EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable); * Bridge */ -static void rcar_lvds_enable(struct drm_bridge *bridge) +static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds, + const struct drm_connector *connector) +{ + const struct drm_display_info *info; + enum rcar_lvds_mode mode; + + /* + * There is no API yet to retrieve LVDS mode from a bridge, only panels + * are supported. + */ + if (!lvds->panel) + return RCAR_LVDS_MODE_JEIDA; + + info = &connector->display_info; + if (!info->num_bus_formats || !info->bus_formats) { + dev_warn(lvds->dev, + "no LVDS bus format reported, using JEIDA\n"); + return RCAR_LVDS_MODE_JEIDA; + } + + switch (info->bus_formats[0]) { + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + mode = RCAR_LVDS_MODE_JEIDA; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + mode = RCAR_LVDS_MODE_VESA; + break; + default: + dev_warn(lvds->dev, + "unsupported LVDS bus format 0x%04x, using JEIDA\n", + info->bus_formats[0]); + return RCAR_LVDS_MODE_JEIDA; + } + + if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB) + mode |= RCAR_LVDS_MODE_MIRROR; + + return mode; +} + +static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_connector *connector) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - const struct drm_display_mode *mode = &lvds->display_mode; u32 lvdhcr; u32 lvdcr0; int ret; @@ -415,8 +462,9 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) return; /* Enable the companion LVDS encoder in dual-link mode. */ - if (lvds->dual_link && lvds->companion) - lvds->companion->funcs->enable(lvds->companion); + if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion) + __rcar_lvds_atomic_enable(lvds->companion, state, crtc, + connector); /* * Hardcode the channels and control signals routing for now. @@ -440,30 +488,51 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) rcar_lvds_write(lvds, LVDCHCR, lvdhcr); if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) { - /* - * Configure vertical stripe based on the mode of operation of - * the connected device. - */ - rcar_lvds_write(lvds, LVDSTRIPE, - lvds->dual_link ? LVDSTRIPE_ST_ON : 0); + u32 lvdstripe = 0; + + if (lvds->link_type != RCAR_LVDS_SINGLE_LINK) { + /* + * By default we generate even pixels from the primary + * encoder and odd pixels from the companion encoder. + * Swap pixels around if the sink requires odd pixels + * from the primary encoder and even pixels from the + * companion encoder. + */ + bool swap_pixels = lvds->link_type == + RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; + + /* + * Configure vertical stripe since we are dealing with + * an LVDS dual-link connection. + * + * ST_SWAP is reserved for the companion encoder, only + * set it in the primary encoder. + */ + lvdstripe = LVDSTRIPE_ST_ON + | (lvds->companion && swap_pixels ? + LVDSTRIPE_ST_SWAP : 0); + } + rcar_lvds_write(lvds, LVDSTRIPE, lvdstripe); } /* * PLL clock configuration on all instances but the companion in * dual-link mode. */ - if (!lvds->dual_link || lvds->companion) + if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) { + const struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + const struct drm_display_mode *mode = + &crtc_state->adjusted_mode; + lvds->info->pll_setup(lvds, mode->clock * 1000); + } /* Set the LVDS mode and select the input. */ - lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; + lvdcr0 = rcar_lvds_get_lvds_mode(lvds, connector) << LVDCR0_LVMD_SHIFT; if (lvds->bridge.encoder) { - /* - * FIXME: We should really retrieve the CRTC through the state, - * but how do we get a state pointer? - */ - if (drm_crtc_index(lvds->bridge.encoder->crtc) == 2) + if (drm_crtc_index(crtc) == 2) lvdcr0 |= LVDCR0_DUSEL; } @@ -520,7 +589,21 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) } } -static void rcar_lvds_disable(struct drm_bridge *bridge) +static void rcar_lvds_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_connector *connector; + struct drm_crtc *crtc; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + + __rcar_lvds_atomic_enable(bridge, state, crtc, connector); +} + +static void rcar_lvds_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); @@ -534,8 +617,8 @@ static void rcar_lvds_disable(struct drm_bridge *bridge) rcar_lvds_write(lvds, LVDPLLCR, 0); /* Disable the companion LVDS encoder in dual-link mode. */ - if (lvds->dual_link && lvds->companion) - lvds->companion->funcs->disable(lvds->companion); + if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion) + lvds->companion->funcs->atomic_disable(lvds->companion, state); clk_disable_unprepare(lvds->clocks.mod); } @@ -558,54 +641,6 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge, return true; } -static void rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds) -{ - struct drm_display_info *info = &lvds->connector.display_info; - enum rcar_lvds_mode mode; - - /* - * There is no API yet to retrieve LVDS mode from a bridge, only panels - * are supported. - */ - if (!lvds->panel) - return; - - if (!info->num_bus_formats || !info->bus_formats) { - dev_err(lvds->dev, "no LVDS bus format reported\n"); - return; - } - - switch (info->bus_formats[0]) { - case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: - case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: - mode = RCAR_LVDS_MODE_JEIDA; - break; - case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: - mode = RCAR_LVDS_MODE_VESA; - break; - default: - dev_err(lvds->dev, "unsupported LVDS bus format 0x%04x\n", - info->bus_formats[0]); - return; - } - - if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB) - mode |= RCAR_LVDS_MODE_MIRROR; - - lvds->mode = mode; -} - -static void rcar_lvds_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - - lvds->display_mode = *adjusted_mode; - - rcar_lvds_get_lvds_mode(lvds); -} - static int rcar_lvds_attach(struct drm_bridge *bridge) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); @@ -647,17 +682,16 @@ static void rcar_lvds_detach(struct drm_bridge *bridge) static const struct drm_bridge_funcs rcar_lvds_bridge_ops = { .attach = rcar_lvds_attach, .detach = rcar_lvds_detach, - .enable = rcar_lvds_enable, - .disable = rcar_lvds_disable, + .atomic_enable = rcar_lvds_atomic_enable, + .atomic_disable = rcar_lvds_atomic_disable, .mode_fixup = rcar_lvds_mode_fixup, - .mode_set = rcar_lvds_mode_set, }; bool rcar_lvds_dual_link(struct drm_bridge *bridge) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - return lvds->dual_link; + return lvds->link_type != RCAR_LVDS_SINGLE_LINK; } EXPORT_SYMBOL_GPL(rcar_lvds_dual_link); @@ -669,7 +703,10 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds) { const struct of_device_id *match; struct device_node *companion; + struct device_node *port0, *port1; + struct rcar_lvds *companion_lvds; struct device *dev = lvds->dev; + int dual_link; int ret = 0; /* Locate the companion LVDS encoder for dual-link operation, if any. */ @@ -688,13 +725,68 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds) goto done; } + /* + * We need to work out if the sink is expecting us to function in + * dual-link mode. We do this by looking at the DT port nodes we are + * connected to, if they are marked as expecting even pixels and + * odd pixels than we need to enable vertical stripe output. + */ + port0 = of_graph_get_port_by_id(dev->of_node, 1); + port1 = of_graph_get_port_by_id(companion, 1); + dual_link = drm_of_lvds_get_dual_link_pixel_order(port0, port1); + of_node_put(port0); + of_node_put(port1); + + switch (dual_link) { + case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: + lvds->link_type = RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; + break; + case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: + lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS; + break; + default: + /* + * Early dual-link bridge specific implementations populate the + * timings field of drm_bridge. If the flag is set, we assume + * that we are expected to generate even pixels from the primary + * encoder, and odd pixels from the companion encoder. + */ + if (lvds->next_bridge && lvds->next_bridge->timings && + lvds->next_bridge->timings->dual_link) + lvds->link_type = RCAR_LVDS_DUAL_LINK_EVEN_ODD_PIXELS; + else + lvds->link_type = RCAR_LVDS_SINGLE_LINK; + } + + if (lvds->link_type == RCAR_LVDS_SINGLE_LINK) { + dev_dbg(dev, "Single-link configuration detected\n"); + goto done; + } + lvds->companion = of_drm_find_bridge(companion); if (!lvds->companion) { ret = -EPROBE_DEFER; goto done; } - dev_dbg(dev, "Found companion encoder %pOF\n", companion); + dev_dbg(dev, + "Dual-link configuration detected (companion encoder %pOF)\n", + companion); + + if (lvds->link_type == RCAR_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) + dev_dbg(dev, "Data swapping required\n"); + + /* + * FIXME: We should not be messing with the companion encoder private + * data from the primary encoder, we should rather let the companion + * encoder work things out on its own. However, the companion encoder + * doesn't hold a reference to the primary encoder, and + * drm_of_lvds_get_dual_link_pixel_order needs to be given references + * to the output ports of both encoders, therefore leave it like this + * for the time being. + */ + companion_lvds = bridge_to_rcar_lvds(lvds->companion); + companion_lvds->link_type = lvds->link_type; done: of_node_put(companion); @@ -704,79 +796,17 @@ done: static int rcar_lvds_parse_dt(struct rcar_lvds *lvds) { - struct device_node *local_output = NULL; - struct device_node *remote_input = NULL; - struct device_node *remote = NULL; - struct device_node *node; - bool is_bridge = false; - int ret = 0; - - local_output = of_graph_get_endpoint_by_regs(lvds->dev->of_node, 1, 0); - if (!local_output) { - dev_dbg(lvds->dev, "unconnected port@1\n"); - ret = -ENODEV; - goto done; - } - - /* - * Locate the connected entity and infer its type from the number of - * endpoints. - */ - remote = of_graph_get_remote_port_parent(local_output); - if (!remote) { - dev_dbg(lvds->dev, "unconnected endpoint %pOF\n", local_output); - ret = -ENODEV; - goto done; - } + int ret; - if (!of_device_is_available(remote)) { - dev_dbg(lvds->dev, "connected entity %pOF is disabled\n", - remote); - ret = -ENODEV; + ret = drm_of_find_panel_or_bridge(lvds->dev->of_node, 1, 0, + &lvds->panel, &lvds->next_bridge); + if (ret) goto done; - } - - remote_input = of_graph_get_remote_endpoint(local_output); - for_each_endpoint_of_node(remote, node) { - if (node != remote_input) { - /* - * We've found one endpoint other than the input, this - * must be a bridge. - */ - is_bridge = true; - of_node_put(node); - break; - } - } - - if (is_bridge) { - lvds->next_bridge = of_drm_find_bridge(remote); - if (!lvds->next_bridge) { - ret = -EPROBE_DEFER; - goto done; - } - - if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) - lvds->dual_link = lvds->next_bridge->timings - ? lvds->next_bridge->timings->dual_link - : false; - } else { - lvds->panel = of_drm_find_panel(remote); - if (IS_ERR(lvds->panel)) { - ret = PTR_ERR(lvds->panel); - goto done; - } - } - - if (lvds->dual_link) + if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) ret = rcar_lvds_parse_dt_companion(lvds); done: - of_node_put(local_output); - of_node_put(remote_input); - of_node_put(remote); - /* * On D3/E3 the LVDS encoder provides a clock to the DU, which can be * used for the DPAD output even when the LVDS output is not connected. diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 1a5153197fe9..461a7a8129f4 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -23,6 +23,7 @@ #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/completion.h> #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> @@ -68,6 +69,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, if (!entity->rq_list) return -ENOMEM; + init_completion(&entity->entity_idle); + for (i = 0; i < num_rq_list; ++i) entity->rq_list[i] = rq_list[i]; @@ -286,11 +289,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) */ if (spsc_queue_count(&entity->job_queue)) { if (sched) { - /* Park the kernel for a moment to make sure it isn't processing - * our enity. + /* + * Wait for thread to idle to make sure it isn't processing + * this entity. */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); + wait_for_completion(&entity->entity_idle); + } if (entity->dependency) { dma_fence_remove_callback(entity->dependency, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 40f396b00797..3fad5876a13f 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -47,6 +47,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> +#include <linux/completion.h> #include <uapi/linux/sched/types.h> #include <drm/drm_print.h> @@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) list_for_each_entry_continue(entity, &rq->entities, list) { if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -506,6 +509,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) struct drm_sched_job *s_job, *tmp; uint64_t guilty_context; bool found_guilty = false; + struct dma_fence *fence; list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct drm_sched_fence *s_fence = s_job->s_fence; @@ -519,7 +523,18 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) dma_fence_set_error(&s_fence->finished, -ECANCELED); dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = sched->ops->run_job(s_job); + fence = sched->ops->run_job(s_job); + + if (IS_ERR_OR_NULL(fence)) { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + + s_job->s_fence->parent = NULL; + } else { + s_job->s_fence->parent = fence; + } + + } } EXPORT_SYMBOL(drm_sched_resubmit_jobs); @@ -662,9 +677,13 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) struct drm_sched_job *job; unsigned long flags; - /* Don't destroy jobs while the timeout worker is running */ - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - !cancel_delayed_work(&sched->work_tdr)) + /* + * Don't destroy jobs while the timeout worker is running OR thread + * is being parked and hence assumed to not touch ring_mirror_list + */ + if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && + !cancel_delayed_work(&sched->work_tdr)) || + __kthread_should_park(sched->thread)) return NULL; spin_lock_irqsave(&sched->job_list_lock, flags); @@ -741,6 +760,9 @@ static int drm_sched_main(void *param) continue; sched_job = drm_sched_entity_pop_job(entity); + + complete(&entity->entity_idle); + if (!sched_job) continue; @@ -752,7 +774,7 @@ static int drm_sched_main(void *param) fence = sched->ops->run_job(sched_job); drm_sched_fence_scheduled(s_fence); - if (fence) { + if (!IS_ERR_OR_NULL(fence)) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &sched_job->cb, drm_sched_process_job); @@ -762,8 +784,12 @@ static int drm_sched_main(void *param) DRM_ERROR("fence add callback failed (%d)\n", r); dma_fence_put(fence); - } else + } else { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + drm_sched_process_job(NULL, &sched_job->cb); + } wake_up(&sched->job_scheduled); } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index fad72799b8df..42651d737c55 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -489,7 +489,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, WARN_ON(!tcon->quirks->has_channel_0); - tcon->dclk_min_div = 6; + tcon->dclk_min_div = 1; tcon->dclk_max_div = 127; sun4i_tcon0_mode_set_common(tcon, mode); diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 1d1269fde3c1..5043dcaf1cf9 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -9,7 +9,7 @@ config DRM_TEGRA select DRM_MIPI_DSI select DRM_PANEL select TEGRA_HOST1X - select IOMMU_IOVA if IOMMU_SUPPORT + select IOMMU_IOVA select CEC_CORE if CEC_NOTIFIER help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index fbf57bc3cdab..714af052fbef 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, window.swap = state->swap; for (i = 0; i < fb->format->num_planes; i++) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, i); - - window.base[i] = bo->paddr + fb->offsets[i]; + window.base[i] = state->iova[i] + fb->offsets[i]; /* * Tegra uses a shared stride for UV planes. Framebuffers are @@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_plane_atomic_check, .atomic_disable = tegra_plane_atomic_disable, .atomic_update = tegra_plane_atomic_update, @@ -837,16 +837,15 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane, static void tegra_cursor_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0); + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); - struct drm_plane_state *state = plane->state; u32 value = CURSOR_CLIP_DISPLAY; /* rien ne va plus */ if (!plane->state->crtc || !plane->state->fb) return; - switch (state->crtc_w) { + switch (plane->state->crtc_w) { case 32: value |= CURSOR_SIZE_32x32; break; @@ -864,16 +863,16 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, break; default: - WARN(1, "cursor size %ux%u not supported\n", state->crtc_w, - state->crtc_h); + WARN(1, "cursor size %ux%u not supported\n", + plane->state->crtc_w, plane->state->crtc_h); return; } - value |= (bo->paddr >> 10) & 0x3fffff; + value |= (state->iova[0] >> 10) & 0x3fffff; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - value = (bo->paddr >> 32) & 0x3; + value = (state->iova[0] >> 32) & 0x3; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); #endif @@ -892,7 +891,8 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); /* position the cursor */ - value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff); + value = (plane->state->crtc_y & 0x3fff) << 16 | + (plane->state->crtc_x & 0x3fff); tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); } @@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_cursor_atomic_check, .atomic_update = tegra_cursor_atomic_update, .atomic_disable = tegra_cursor_atomic_disable, @@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client) if (!dc->syncpt) dev_warn(dc->dev, "failed to allocate syncpoint\n"); - dc->group = host1x_client_iommu_attach(client, true); - if (IS_ERR(dc->group)) { - err = PTR_ERR(dc->group); + err = host1x_client_iommu_attach(client); + if (err < 0 && err != -ENODEV) { dev_err(client->dev, "failed to attach to domain: %d\n", err); return err; } @@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client) goto cleanup; } + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent device. + */ + client->dev->dma_parms = client->parent->dma_parms; + return 0; cleanup: @@ -2083,7 +2090,7 @@ cleanup: if (!IS_ERR(primary)) drm_plane_cleanup(primary); - host1x_client_iommu_detach(client, dc->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(dc->syncpt); return err; @@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client) if (!tegra_dc_has_window_groups(dc)) return 0; + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + devm_free_irq(dc->dev, dc->irq, dc); err = tegra_dc_rgb_exit(dc); @@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client) return err; } - host1x_client_iommu_detach(client, dc->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(dc->syncpt); return 0; diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 0c4d17851f47..3d8ddccd758f 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -90,8 +90,6 @@ struct tegra_dc { struct drm_info_list *debugfs_files; const struct tegra_dc_soc_info *soc; - - struct iommu_group *group; }; static inline struct tegra_dc * diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c index 50ba967ebcbd..70dfb7d1dec5 100644 --- a/drivers/gpu/drm/tegra/dp.c +++ b/drivers/gpu/drm/tegra/dp.c @@ -4,10 +4,158 @@ * Copyright (C) 2015 Rob Clark */ +#include <drm/drm_crtc.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> #include "dp.h" +static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 }; + +static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps) +{ + caps->enhanced_framing = false; + caps->tps3_supported = false; + caps->fast_training = false; + caps->channel_coding = false; + caps->alternate_scrambler_reset = false; +} + +void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest, + const struct drm_dp_link_caps *src) +{ + dest->enhanced_framing = src->enhanced_framing; + dest->tps3_supported = src->tps3_supported; + dest->fast_training = src->fast_training; + dest->channel_coding = src->channel_coding; + dest->alternate_scrambler_reset = src->alternate_scrambler_reset; +} + +static void drm_dp_link_reset(struct drm_dp_link *link) +{ + unsigned int i; + + if (!link) + return; + + link->revision = 0; + link->max_rate = 0; + link->max_lanes = 0; + + drm_dp_link_caps_reset(&link->caps); + link->aux_rd_interval.cr = 0; + link->aux_rd_interval.ce = 0; + link->edp = 0; + + link->rate = 0; + link->lanes = 0; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) + link->rates[i] = 0; + + link->num_rates = 0; +} + +/** + * drm_dp_link_add_rate() - add a rate to the list of supported rates + * @link: the link to add the rate to + * @rate: the rate to add + * + * Add a link rate to the list of supported link rates. + * + * Returns: + * 0 on success or one of the following negative error codes on failure: + * - ENOSPC if the maximum number of supported rates has been reached + * - EEXISTS if the link already supports this rate + * + * See also: + * drm_dp_link_remove_rate() + */ +int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate) +{ + unsigned int i, pivot; + + if (link->num_rates == DP_MAX_SUPPORTED_RATES) + return -ENOSPC; + + for (pivot = 0; pivot < link->num_rates; pivot++) + if (rate <= link->rates[pivot]) + break; + + if (pivot != link->num_rates && rate == link->rates[pivot]) + return -EEXIST; + + for (i = link->num_rates; i > pivot; i--) + link->rates[i] = link->rates[i - 1]; + + link->rates[pivot] = rate; + link->num_rates++; + + return 0; +} + +/** + * drm_dp_link_remove_rate() - remove a rate from the list of supported rates + * @link: the link from which to remove the rate + * @rate: the rate to remove + * + * Removes a link rate from the list of supported link rates. + * + * Returns: + * 0 on success or one of the following negative error codes on failure: + * - EINVAL if the specified rate is not among the supported rates + * + * See also: + * drm_dp_link_add_rate() + */ +int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate) +{ + unsigned int i; + + for (i = 0; i < link->num_rates; i++) + if (rate == link->rates[i]) + break; + + if (i == link->num_rates) + return -EINVAL; + + link->num_rates--; + + while (i < link->num_rates) { + link->rates[i] = link->rates[i + 1]; + i++; + } + + return 0; +} + +/** + * drm_dp_link_update_rates() - normalize the supported link rates array + * @link: the link for which to normalize the supported link rates + * + * Users should call this function after they've manually modified the array + * of supported link rates. This function removes any stale entries, compacts + * the array and updates the supported link rate count. Note that calling the + * drm_dp_link_remove_rate() function already does this janitorial work. + * + * See also: + * drm_dp_link_add_rate(), drm_dp_link_remove_rate() + */ +void drm_dp_link_update_rates(struct drm_dp_link *link) +{ + unsigned int i, count = 0; + + for (i = 0; i < link->num_rates; i++) { + if (link->rates[i] != 0) + link->rates[count++] = link->rates[i]; + } + + for (i = count; i < link->num_rates; i++) + link->rates[i] = 0; + + link->num_rates = count; +} + /** * drm_dp_link_probe() - probe a DisplayPort link for capabilities * @aux: DisplayPort AUX channel @@ -21,21 +169,88 @@ */ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) { - u8 values[3]; + u8 dpcd[DP_RECEIVER_CAP_SIZE], value; + unsigned int rd_interval; int err; - memset(link, 0, sizeof(*link)); + drm_dp_link_reset(link); - err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd)); if (err < 0) return err; - link->revision = values[0]; - link->rate = drm_dp_bw_code_to_link_rate(values[1]); - link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + link->revision = dpcd[DP_DPCD_REV]; + link->max_rate = drm_dp_max_link_rate(dpcd); + link->max_lanes = drm_dp_max_lane_count(dpcd); + + link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd); + link->caps.tps3_supported = drm_dp_tps3_supported(dpcd); + link->caps.fast_training = drm_dp_fast_training_cap(dpcd); + link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd); + + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) { + link->caps.alternate_scrambler_reset = true; + + err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value); + if (err < 0) + return err; + + if (value >= ARRAY_SIZE(drm_dp_edp_revisions)) + DRM_ERROR("unsupported eDP version: %02x\n", value); + else + link->edp = drm_dp_edp_revisions[value]; + } + + /* + * The DPCD stores the AUX read interval in units of 4 ms. There are + * two special cases: + * + * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery + * and channel equalization should use 100 us or 400 us AUX read + * intervals, respectively + * + * 2) for DP v1.4 and above, clock recovery should always use 100 us + * AUX read intervals + */ + rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) { + DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n", + rd_interval); + rd_interval = 4; + } + + rd_interval *= 4 * USEC_PER_MSEC; - if (values[2] & DP_ENHANCED_FRAME_CAP) - link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14) + link->aux_rd_interval.cr = 100; + + if (rd_interval == 0) + link->aux_rd_interval.ce = 400; + + link->rate = link->max_rate; + link->lanes = link->max_lanes; + + /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */ + if (link->edp >= 0x14) { + u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2]; + unsigned int i; + u16 rate; + + err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, + supported_rates, + sizeof(supported_rates)); + if (err < 0) + return err; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) { + rate = supported_rates[i * 2 + 1] << 8 | + supported_rates[i * 2 + 0]; + + drm_dp_link_add_rate(link, rate * 200); + } + } return 0; } @@ -116,18 +331,546 @@ int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) */ int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) { - u8 values[2]; + u8 values[2], value; int err; + if (link->ops && link->ops->configure) { + err = link->ops->configure(link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + } + values[0] = drm_dp_link_rate_to_bw_code(link->rate); - values[1] = link->num_lanes; + values[1] = link->lanes; - if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (link->caps.enhanced_framing) values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); if (err < 0) return err; + if (link->caps.channel_coding) + value = DP_SET_ANSI_8B10B; + else + value = 0; + + err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (err < 0) + return err; + + if (link->caps.alternate_scrambler_reset) { + err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, + DP_ALTERNATE_SCRAMBLER_RESET_ENABLE); + if (err < 0) + return err; + } + + return 0; +} + +/** + * drm_dp_link_choose() - choose the lowest possible configuration for a mode + * @link: DRM DP link object + * @mode: DRM display mode + * @info: DRM display information + * + * According to the eDP specification, a source should select a configuration + * with the lowest number of lanes and the lowest possible link rate that can + * match the bitrate requirements of a video mode. However it must ensure not + * to exceed the capabilities of the sink. + * + * Returns: 0 on success or a negative error code on failure. + */ +int drm_dp_link_choose(struct drm_dp_link *link, + const struct drm_display_mode *mode, + const struct drm_display_info *info) +{ + /* available link symbol clock rates */ + static const unsigned int rates[3] = { 162000, 270000, 540000 }; + /* available number of lanes */ + static const unsigned int lanes[3] = { 1, 2, 4 }; + unsigned long requirement, capacity; + unsigned int rate = link->max_rate; + unsigned int i, j; + + /* bandwidth requirement */ + requirement = mode->clock * info->bpc * 3; + + for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) { + for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) { + /* + * Capacity for this combination of lanes and rate, + * factoring in the ANSI 8B/10B encoding. + * + * Link rates in the DRM DP helpers are really link + * symbol frequencies, so a tenth of the actual rate + * of the link. + */ + capacity = lanes[i] * (rates[j] * 10) * 8 / 10; + + if (capacity >= requirement) { + DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n", + lanes[i], rates[j], requirement, + capacity); + link->lanes = lanes[i]; + link->rate = rates[j]; + return 0; + } + } + } + + return -ERANGE; +} + +/** + * DOC: Link training + * + * These functions contain common logic and helpers to implement DisplayPort + * link training. + */ + +/** + * drm_dp_link_train_init() - initialize DisplayPort link training state + * @train: DisplayPort link training state + */ +void drm_dp_link_train_init(struct drm_dp_link_train *train) +{ + struct drm_dp_link_train_set *request = &train->request; + struct drm_dp_link_train_set *adjust = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) { + request->voltage_swing[i] = 0; + adjust->voltage_swing[i] = 0; + + request->pre_emphasis[i] = 0; + adjust->pre_emphasis[i] = 0; + + request->post_cursor[i] = 0; + adjust->post_cursor[i] = 0; + } + + train->pattern = DP_TRAINING_PATTERN_DISABLE; + train->clock_recovered = false; + train->channel_equalized = false; +} + +static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train) +{ + return train->clock_recovered && train->channel_equalized; +} + +static int drm_dp_link_apply_training(struct drm_dp_link *link) +{ + struct drm_dp_link_train_set *request = &link->train.request; + unsigned int lanes = link->lanes, *vs, *pe, *pc, i; + struct drm_dp_aux *aux = link->aux; + u8 values[4], pattern = 0; + int err; + + err = link->ops->apply_training(link); + if (err < 0) { + DRM_ERROR("failed to apply link training: %d\n", err); + return err; + } + + vs = request->voltage_swing; + pe = request->pre_emphasis; + pc = request->post_cursor; + + /* write currently selected voltage-swing and pre-emphasis levels */ + for (i = 0; i < lanes; i++) + values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) | + DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]); + + err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes); + if (err < 0) { + DRM_ERROR("failed to set training parameters: %d\n", err); + return err; + } + + /* write currently selected post-cursor level (if supported) */ + if (link->revision >= 0x12 && link->rate == 540000) { + values[0] = values[1] = 0; + + for (i = 0; i < lanes; i++) + values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]); + + err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values, + DIV_ROUND_UP(lanes, 2)); + if (err < 0) { + DRM_ERROR("failed to set post-cursor: %d\n", err); + return err; + } + } + + /* write link pattern */ + if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE) + pattern |= DP_LINK_SCRAMBLING_DISABLE; + + pattern |= link->train.pattern; + + err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern); + if (err < 0) { + DRM_ERROR("failed to set training pattern: %d\n", err); + return err; + } + return 0; } + +static void drm_dp_link_train_wait(struct drm_dp_link *link) +{ + unsigned long min = 0; + + switch (link->train.pattern) { + case DP_TRAINING_PATTERN_1: + min = link->aux_rd_interval.cr; + break; + + case DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_3: + min = link->aux_rd_interval.ce; + break; + + default: + break; + } + + if (min > 0) + usleep_range(min, 2 * min); +} + +static void drm_dp_link_get_adjustments(struct drm_dp_link *link, + u8 status[DP_LINK_STATUS_SIZE]) +{ + struct drm_dp_link_train_set *adjust = &link->train.adjust; + unsigned int i; + + for (i = 0; i < link->lanes; i++) { + adjust->voltage_swing[i] = + drm_dp_get_adjust_request_voltage(status, i) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + + adjust->pre_emphasis[i] = + drm_dp_get_adjust_request_pre_emphasis(status, i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + adjust->post_cursor[i] = + drm_dp_get_adjust_request_post_cursor(status, i); + } +} + +static void drm_dp_link_train_adjust(struct drm_dp_link_train *train) +{ + struct drm_dp_link_train_set *request = &train->request; + struct drm_dp_link_train_set *adjust = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) + if (request->voltage_swing[i] != adjust->voltage_swing[i]) + request->voltage_swing[i] = adjust->voltage_swing[i]; + + for (i = 0; i < 4; i++) + if (request->pre_emphasis[i] != adjust->pre_emphasis[i]) + request->pre_emphasis[i] = adjust->pre_emphasis[i]; + + for (i = 0; i < 4; i++) + if (request->post_cursor[i] != adjust->post_cursor[i]) + request->post_cursor[i] = adjust->post_cursor[i]; +} + +static int drm_dp_link_recover_clock(struct drm_dp_link *link) +{ + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + err = drm_dp_link_apply_training(link); + if (err < 0) + return err; + + drm_dp_link_train_wait(link); + + err = drm_dp_dpcd_read_link_status(link->aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + return err; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) + drm_dp_link_get_adjustments(link, status); + else + link->train.clock_recovered = true; + + return 0; +} + +static int drm_dp_link_clock_recovery(struct drm_dp_link *link) +{ + unsigned int repeat; + int err; + + /* start clock recovery using training pattern 1 */ + link->train.pattern = DP_TRAINING_PATTERN_1; + + for (repeat = 1; repeat < 5; repeat++) { + err = drm_dp_link_recover_clock(link); + if (err < 0) { + DRM_ERROR("failed to recover clock: %d\n", err); + return err; + } + + if (link->train.clock_recovered) + break; + + drm_dp_link_train_adjust(&link->train); + } + + return 0; +} + +static int drm_dp_link_equalize_channel(struct drm_dp_link *link) +{ + struct drm_dp_aux *aux = link->aux; + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + err = drm_dp_link_apply_training(link); + if (err < 0) + return err; + + drm_dp_link_train_wait(link); + + err = drm_dp_dpcd_read_link_status(aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + return err; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + DRM_ERROR("clock recovery lost while equalizing channel\n"); + link->train.clock_recovered = false; + return 0; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) + drm_dp_link_get_adjustments(link, status); + else + link->train.channel_equalized = true; + + return 0; +} + +static int drm_dp_link_channel_equalization(struct drm_dp_link *link) +{ + unsigned int repeat; + int err; + + /* start channel equalization using pattern 2 or 3 */ + if (link->caps.tps3_supported) + link->train.pattern = DP_TRAINING_PATTERN_3; + else + link->train.pattern = DP_TRAINING_PATTERN_2; + + for (repeat = 1; repeat < 5; repeat++) { + err = drm_dp_link_equalize_channel(link); + if (err < 0) { + DRM_ERROR("failed to equalize channel: %d\n", err); + return err; + } + + if (link->train.channel_equalized) + break; + + drm_dp_link_train_adjust(&link->train); + } + + return 0; +} + +static int drm_dp_link_downgrade(struct drm_dp_link *link) +{ + switch (link->rate) { + case 162000: + return -EINVAL; + + case 270000: + link->rate = 162000; + break; + + case 540000: + link->rate = 270000; + return 0; + } + + return 0; +} + +static void drm_dp_link_train_disable(struct drm_dp_link *link) +{ + int err; + + link->train.pattern = DP_TRAINING_PATTERN_DISABLE; + + err = drm_dp_link_apply_training(link); + if (err < 0) + DRM_ERROR("failed to disable link training: %d\n", err); +} + +static int drm_dp_link_train_full(struct drm_dp_link *link) +{ + int err; + +retry: + DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", + link->rate / 100); + + err = drm_dp_link_configure(link->aux, link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + + err = drm_dp_link_clock_recovery(link); + if (err < 0) { + DRM_ERROR("clock recovery failed: %d\n", err); + goto out; + } + + if (!link->train.clock_recovered) { + DRM_ERROR("clock recovery failed, downgrading link\n"); + + err = drm_dp_link_downgrade(link); + if (err < 0) + goto out; + + goto retry; + } + + DRM_DEBUG_KMS("clock recovery succeeded\n"); + + err = drm_dp_link_channel_equalization(link); + if (err < 0) { + DRM_ERROR("channel equalization failed: %d\n", err); + goto out; + } + + if (!link->train.channel_equalized) { + DRM_ERROR("channel equalization failed, downgrading link\n"); + + err = drm_dp_link_downgrade(link); + if (err < 0) + goto out; + + goto retry; + } + + DRM_DEBUG_KMS("channel equalization succeeded\n"); + +out: + drm_dp_link_train_disable(link); + return err; +} + +static int drm_dp_link_train_fast(struct drm_dp_link *link) +{ + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", + link->rate / 100); + + err = drm_dp_link_configure(link->aux, link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + + /* transmit training pattern 1 for 500 microseconds */ + link->train.pattern = DP_TRAINING_PATTERN_1; + + err = drm_dp_link_apply_training(link); + if (err < 0) + goto out; + + usleep_range(500, 1000); + + /* transmit training pattern 2 or 3 for 500 microseconds */ + if (link->caps.tps3_supported) + link->train.pattern = DP_TRAINING_PATTERN_3; + else + link->train.pattern = DP_TRAINING_PATTERN_2; + + err = drm_dp_link_apply_training(link); + if (err < 0) + goto out; + + usleep_range(500, 1000); + + err = drm_dp_dpcd_read_link_status(link->aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + goto out; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + DRM_ERROR("clock recovery failed\n"); + err = -EIO; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) { + DRM_ERROR("channel equalization failed\n"); + err = -EIO; + } + +out: + drm_dp_link_train_disable(link); + return err; +} + +/** + * drm_dp_link_train() - perform DisplayPort link training + * @link: a DP link object + * + * Uses the context stored in the DP link object to perform link training. It + * is expected that drivers will call drm_dp_link_probe() to obtain the link + * capabilities before performing link training. + * + * If the sink supports fast link training (no AUX CH handshake) and valid + * training settings are available, this function will try to perform fast + * link training and fall back to full link training on failure. + * + * Returns: 0 on success or a negative error code on failure. + */ +int drm_dp_link_train(struct drm_dp_link *link) +{ + int err; + + drm_dp_link_train_init(&link->train); + + if (link->caps.fast_training) { + if (drm_dp_link_train_valid(&link->train)) { + err = drm_dp_link_train_fast(link); + if (err < 0) + DRM_ERROR("fast link training failed: %d\n", + err); + else + return 0; + } else { + DRM_DEBUG_KMS("training parameters not available\n"); + } + } else { + DRM_DEBUG_KMS("fast link training not supported\n"); + } + + err = drm_dp_link_train_full(link); + if (err < 0) + DRM_ERROR("full link training failed: %d\n", err); + + return err; +} diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h index 88842fd25abf..cb12ed0c54e7 100644 --- a/drivers/gpu/drm/tegra/dp.h +++ b/drivers/gpu/drm/tegra/dp.h @@ -7,20 +7,171 @@ #ifndef DRM_TEGRA_DP_H #define DRM_TEGRA_DP_H 1 +#include <linux/types.h> + +struct drm_display_info; +struct drm_display_mode; struct drm_dp_aux; +struct drm_dp_link; + +/** + * struct drm_dp_link_caps - DP link capabilities + */ +struct drm_dp_link_caps { + /** + * @enhanced_framing: + * + * enhanced framing capability (mandatory as of DP 1.2) + */ + bool enhanced_framing; + + /** + * tps3_supported: + * + * training pattern sequence 3 supported for equalization + */ + bool tps3_supported; + + /** + * @fast_training: + * + * AUX CH handshake not required for link training + */ + bool fast_training; + + /** + * @channel_coding: + * + * ANSI 8B/10B channel coding capability + */ + bool channel_coding; + + /** + * @alternate_scrambler_reset: + * + * eDP alternate scrambler reset capability + */ + bool alternate_scrambler_reset; +}; + +void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest, + const struct drm_dp_link_caps *src); + +/** + * struct drm_dp_link_ops - DP link operations + */ +struct drm_dp_link_ops { + /** + * @apply_training: + */ + int (*apply_training)(struct drm_dp_link *link); + + /** + * @configure: + */ + int (*configure)(struct drm_dp_link *link); +}; + +#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0) +#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3) +#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2)) + +/** + * struct drm_dp_link_train_set - link training settings + * @voltage_swing: per-lane voltage swing + * @pre_emphasis: per-lane pre-emphasis + * @post_cursor: per-lane post-cursor + */ +struct drm_dp_link_train_set { + unsigned int voltage_swing[4]; + unsigned int pre_emphasis[4]; + unsigned int post_cursor[4]; +}; + +/** + * struct drm_dp_link_train - link training state information + * @request: currently requested settings + * @adjust: adjustments requested by sink + * @pattern: currently requested training pattern + * @clock_recovered: flag to track if clock recovery has completed + * @channel_equalized: flag to track if channel equalization has completed + */ +struct drm_dp_link_train { + struct drm_dp_link_train_set request; + struct drm_dp_link_train_set adjust; + + unsigned int pattern; -#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) + bool clock_recovered; + bool channel_equalized; +}; +/** + * struct drm_dp_link - DP link capabilities and configuration + * @revision: DP specification revision supported on the link + * @max_rate: maximum clock rate supported on the link + * @max_lanes: maximum number of lanes supported on the link + * @caps: capabilities supported on the link (see &drm_dp_link_caps) + * @aux_rd_interval: AUX read interval to use for training (in microseconds) + * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...) + * @rate: currently configured link rate + * @lanes: currently configured number of lanes + * @rates: additional supported link rates in kHz (eDP 1.4) + * @num_rates: number of additional supported link rates (eDP 1.4) + */ struct drm_dp_link { unsigned char revision; + unsigned int max_rate; + unsigned int max_lanes; + + struct drm_dp_link_caps caps; + + /** + * @cr: clock recovery read interval + * @ce: channel equalization read interval + */ + struct { + unsigned int cr; + unsigned int ce; + } aux_rd_interval; + + unsigned char edp; + unsigned int rate; - unsigned int num_lanes; - unsigned long capabilities; + unsigned int lanes; + + unsigned long rates[DP_MAX_SUPPORTED_RATES]; + unsigned int num_rates; + + /** + * @ops: DP link operations + */ + const struct drm_dp_link_ops *ops; + + /** + * @aux: DP AUX channel + */ + struct drm_dp_aux *aux; + + /** + * @train: DP link training state + */ + struct drm_dp_link_train train; }; +int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate); +int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate); +void drm_dp_link_update_rates(struct drm_dp_link *link); + int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_choose(struct drm_dp_link *link, + const struct drm_display_mode *mode, + const struct drm_display_info *info); + +void drm_dp_link_train_init(struct drm_dp_link_train *train); +int drm_dp_link_train(struct drm_dp_link *link); #endif diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 1144605c9737..622cdf1ad246 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> @@ -30,10 +31,18 @@ static DEFINE_MUTEX(dpaux_lock); static LIST_HEAD(dpaux_list); +struct tegra_dpaux_soc { + unsigned int cmh; + unsigned int drvz; + unsigned int drvi; +}; + struct tegra_dpaux { struct drm_dp_aux aux; struct device *dev; + const struct tegra_dpaux_soc *soc; + void __iomem *regs; int irq; @@ -121,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, struct tegra_dpaux *dpaux = to_dpaux(aux); unsigned long status; ssize_t ret = 0; + u8 reply = 0; u32 value; /* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */ @@ -215,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) { case 0x00: - msg->reply = DP_AUX_NATIVE_REPLY_ACK; + reply = DP_AUX_NATIVE_REPLY_ACK; break; case 0x01: - msg->reply = DP_AUX_NATIVE_REPLY_NACK; + reply = DP_AUX_NATIVE_REPLY_NACK; break; case 0x02: - msg->reply = DP_AUX_NATIVE_REPLY_DEFER; + reply = DP_AUX_NATIVE_REPLY_DEFER; break; case 0x04: - msg->reply = DP_AUX_I2C_REPLY_NACK; + reply = DP_AUX_I2C_REPLY_NACK; break; case 0x08: - msg->reply = DP_AUX_I2C_REPLY_DEFER; + reply = DP_AUX_I2C_REPLY_DEFER; break; } @@ -239,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, if (msg->request & DP_AUX_I2C_READ) { size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK; - if (WARN_ON(count != msg->size)) - count = min_t(size_t, count, msg->size); + /* + * There might be a smarter way to do this, but since + * the DP helpers will already retry transactions for + * an -EBUSY return value, simply reuse that instead. + */ + if (count != msg->size) { + ret = -EBUSY; + goto out; + } tegra_dpaux_read_fifo(dpaux, msg->buffer, count); ret = count; } } + msg->reply = reply; + +out: return ret; } @@ -311,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) switch (function) { case DPAUX_PADCTL_FUNC_AUX: - value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) | DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV | DPAUX_HYBRID_PADCTL_MODE_AUX; break; @@ -321,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) case DPAUX_PADCTL_FUNC_I2C: value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV | DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | - DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) | DPAUX_HYBRID_PADCTL_MODE_I2C; break; @@ -437,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) if (!dpaux) return -ENOMEM; + dpaux->soc = of_device_get_match_data(&pdev->dev); INIT_WORK(&dpaux->work, tegra_dpaux_hotplug); init_completion(&dpaux->complete); INIT_LIST_HEAD(&dpaux->list); @@ -494,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return PTR_ERR(dpaux->vdd); } + + dpaux->vdd = NULL; } platform_set_drvdata(pdev, dpaux); @@ -642,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = { SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL) }; +static const struct tegra_dpaux_soc tegra124_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x18, +}; + +static const struct tegra_dpaux_soc tegra210_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x30, +}; + +static const struct tegra_dpaux_soc tegra194_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x2c, +}; + static const struct of_device_id tegra_dpaux_of_match[] = { - { .compatible = "nvidia,tegra194-dpaux", }, - { .compatible = "nvidia,tegra186-dpaux", }, - { .compatible = "nvidia,tegra210-dpaux", }, - { .compatible = "nvidia,tegra124-dpaux", }, + { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc }, + { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc }, + { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc }, + { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc }, { }, }; MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match); @@ -687,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output) output->connector.polled = DRM_CONNECTOR_POLL_HPD; dpaux->output = output; - err = regulator_enable(dpaux->vdd); - if (err < 0) - return err; + if (output->panel) { + enum drm_connector_status status; - timeout = jiffies + msecs_to_jiffies(250); + if (dpaux->vdd) { + err = regulator_enable(dpaux->vdd); + if (err < 0) + return err; + } - while (time_before(jiffies, timeout)) { - enum drm_connector_status status; + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + status = drm_dp_aux_detect(aux); + + if (status == connector_status_connected) + break; - status = drm_dp_aux_detect(aux); - if (status == connector_status_connected) { - enable_irq(dpaux->irq); - return 0; + usleep_range(1000, 2000); } - usleep_range(1000, 2000); + if (status != connector_status_connected) + return -ETIMEDOUT; } - return -ETIMEDOUT; + enable_irq(dpaux->irq); + return 0; } int drm_dp_aux_detach(struct drm_dp_aux *aux) @@ -716,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux) disable_irq(dpaux->irq); - err = regulator_disable(dpaux->vdd); - if (err < 0) - return err; + if (dpaux->output->panel) { + enum drm_connector_status status; - timeout = jiffies + msecs_to_jiffies(250); + if (dpaux->vdd) { + err = regulator_disable(dpaux->vdd); + if (err < 0) + return err; + } - while (time_before(jiffies, timeout)) { - enum drm_connector_status status; + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + status = drm_dp_aux_detect(aux); + + if (status == connector_status_disconnected) + break; - status = drm_dp_aux_detect(aux); - if (status == connector_status_disconnected) { - dpaux->output = NULL; - return 0; + usleep_range(1000, 2000); } - usleep_range(1000, 2000); + if (status != connector_status_disconnected) + return -ETIMEDOUT; + + dpaux->output = NULL; } - return -ETIMEDOUT; + return 0; } enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux) @@ -765,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux) return 0; } - -int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding) -{ - int err; - - err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, - encoding); - if (err < 0) - return err; - - return 0; -} - -int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link, - u8 pattern) -{ - u8 tp = pattern & DP_TRAINING_PATTERN_MASK; - u8 status[DP_LINK_STATUS_SIZE], values[4]; - unsigned int i; - int err; - - err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern); - if (err < 0) - return err; - - if (tp == DP_TRAINING_PATTERN_DISABLE) - return 0; - - for (i = 0; i < link->num_lanes; i++) - values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | - DP_TRAIN_PRE_EMPH_LEVEL_0 | - DP_TRAIN_MAX_SWING_REACHED | - DP_TRAIN_VOLTAGE_SWING_LEVEL_0; - - err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, - link->num_lanes); - if (err < 0) - return err; - - usleep_range(500, 1000); - - err = drm_dp_dpcd_read_link_status(aux, status); - if (err < 0) - return err; - - switch (tp) { - case DP_TRAINING_PATTERN_1: - if (!drm_dp_clock_recovery_ok(status, link->num_lanes)) - return -EAGAIN; - - break; - - case DP_TRAINING_PATTERN_2: - if (!drm_dp_channel_eq_ok(status, link->num_lanes)) - return -EAGAIN; - - break; - - default: - dev_err(aux->dev, "unsupported training pattern %u\n", tp); - return -EINVAL; - } - - err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0); - if (err < 0) - return err; - - return 0; -} diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6fb7d74ff553..f455ce71e85d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -20,10 +20,6 @@ #include <drm/drm_prime.h> #include <drm/drm_vblank.h> -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) -#include <asm/dma-iommu.h> -#endif - #include "drm.h" #include "gem.h" @@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = { .atomic_commit_tail = tegra_atomic_commit_tail, }; -static int tegra_drm_load(struct drm_device *drm, unsigned long flags) -{ - struct host1x_device *device = to_host1x_device(drm->dev); - struct tegra_drm *tegra; - int err; - - tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); - if (!tegra) - return -ENOMEM; - - if (iommu_present(&platform_bus_type)) { - tegra->domain = iommu_domain_alloc(&platform_bus_type); - if (!tegra->domain) { - err = -ENOMEM; - goto free; - } - - err = iova_cache_get(); - if (err < 0) - goto domain; - } - - mutex_init(&tegra->clients_lock); - INIT_LIST_HEAD(&tegra->clients); - - drm->dev_private = tegra; - tegra->drm = drm; - - drm_mode_config_init(drm); - - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.allow_fb_modifiers = true; - - drm->mode_config.normalize_zpos = true; - - drm->mode_config.funcs = &tegra_drm_mode_config_funcs; - drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; - - err = tegra_drm_fb_prepare(drm); - if (err < 0) - goto config; - - drm_kms_helper_poll_init(drm); - - err = host1x_device_init(device); - if (err < 0) - goto fbdev; - - if (tegra->domain) { - u64 carveout_start, carveout_end, gem_start, gem_end; - u64 dma_mask = dma_get_mask(&device->dev); - dma_addr_t start, end; - unsigned long order; - - start = tegra->domain->geometry.aperture_start & dma_mask; - end = tegra->domain->geometry.aperture_end & dma_mask; - - gem_start = start; - gem_end = end - CARVEOUT_SZ; - carveout_start = gem_end + 1; - carveout_end = end; - - order = __ffs(tegra->domain->pgsize_bitmap); - init_iova_domain(&tegra->carveout.domain, 1UL << order, - carveout_start >> order); - - tegra->carveout.shift = iova_shift(&tegra->carveout.domain); - tegra->carveout.limit = carveout_end >> tegra->carveout.shift; - - drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); - mutex_init(&tegra->mm_lock); - - DRM_DEBUG("IOMMU apertures:\n"); - DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); - DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, - carveout_end); - } - - if (tegra->hub) { - err = tegra_display_hub_prepare(tegra->hub); - if (err < 0) - goto device; - } - - /* - * We don't use the drm_irq_install() helpers provided by the DRM - * core, so we need to set this manually in order to allow the - * DRM_IOCTL_WAIT_VBLANK to operate correctly. - */ - drm->irq_enabled = true; - - /* syncpoints are used for full 32-bit hardware VBLANK counters */ - drm->max_vblank_count = 0xffffffff; - - err = drm_vblank_init(drm, drm->mode_config.num_crtc); - if (err < 0) - goto hub; - - drm_mode_config_reset(drm); - - err = tegra_drm_fb_init(drm); - if (err < 0) - goto hub; - - return 0; - -hub: - if (tegra->hub) - tegra_display_hub_cleanup(tegra->hub); -device: - host1x_device_exit(device); -fbdev: - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_free(drm); -config: - drm_mode_config_cleanup(drm); - - if (tegra->domain) { - mutex_destroy(&tegra->mm_lock); - drm_mm_takedown(&tegra->mm); - put_iova_domain(&tegra->carveout.domain); - iova_cache_put(); - } -domain: - if (tegra->domain) - iommu_domain_free(tegra->domain); -free: - kfree(tegra); - return err; -} - -static void tegra_drm_unload(struct drm_device *drm) -{ - struct host1x_device *device = to_host1x_device(drm->dev); - struct tegra_drm *tegra = drm->dev_private; - int err; - - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_exit(drm); - drm_atomic_helper_shutdown(drm); - drm_mode_config_cleanup(drm); - - err = host1x_device_exit(device); - if (err < 0) - return; - - if (tegra->domain) { - mutex_destroy(&tegra->mm_lock); - drm_mm_takedown(&tegra->mm); - put_iova_domain(&tegra->carveout.domain); - iova_cache_put(); - iommu_domain_free(tegra->domain); - } - - kfree(tegra); -} - static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) { struct tegra_drm_file *fpriv; @@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, if (err < 0) return err; + dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE; + dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); if (!dest->cmdbuf.bo) return -ENOENT; @@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor) static struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, - .load = tegra_drm_load, - .unload = tegra_drm_unload, .open = tegra_drm_open, .postclose = tegra_drm_postclose, .lastclose = drm_fb_helper_lastclose, @@ -1068,57 +902,61 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, return 0; } -struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, - bool shared) +int host1x_client_iommu_attach(struct host1x_client *client) { + struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev); struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_drm *tegra = drm->dev_private; struct iommu_group *group = NULL; int err; + /* + * If the host1x client is already attached to an IOMMU domain that is + * not the shared IOMMU domain, don't try to attach it to a different + * domain. This allows using the IOMMU-backed DMA API. + */ + if (domain && domain != tegra->domain) + return 0; + if (tegra->domain) { group = iommu_group_get(client->dev); - if (!group) { - dev_err(client->dev, "failed to get IOMMU group\n"); - return ERR_PTR(-ENODEV); - } + if (!group) + return -ENODEV; - if (!shared || (shared && (group != tegra->group))) { -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) - if (client->dev->archdata.mapping) { - struct dma_iommu_mapping *mapping = - to_dma_iommu_mapping(client->dev); - arm_iommu_detach_device(client->dev); - arm_iommu_release_mapping(mapping); - } -#endif + if (domain != tegra->domain) { err = iommu_attach_group(tegra->domain, group); if (err < 0) { iommu_group_put(group); - return ERR_PTR(err); + return err; } - - if (shared && !tegra->group) - tegra->group = group; } + + tegra->use_explicit_iommu = true; } - return group; + client->group = group; + + return 0; } -void host1x_client_iommu_detach(struct host1x_client *client, - struct iommu_group *group) +void host1x_client_iommu_detach(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_drm *tegra = drm->dev_private; + struct iommu_domain *domain; - if (group) { - if (group == tegra->group) { - iommu_detach_group(tegra->domain, group); - tegra->group = NULL; - } + if (client->group) { + /* + * Devices that are part of the same group may no longer be + * attached to a domain at this point because their group may + * have been detached by an earlier client. + */ + domain = iommu_get_domain_for_dev(client->dev); + if (domain) + iommu_detach_group(tegra->domain, client->group); - iommu_group_put(group); + iommu_group_put(client->group); + client->group = NULL; } } @@ -1202,6 +1040,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, static int host1x_drm_probe(struct host1x_device *dev) { struct drm_driver *driver = &tegra_drm_driver; + struct iommu_domain *domain; + struct tegra_drm *tegra; struct drm_device *drm; int err; @@ -1209,18 +1049,180 @@ static int host1x_drm_probe(struct host1x_device *dev) if (IS_ERR(drm)) return PTR_ERR(drm); + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); + if (!tegra) { + err = -ENOMEM; + goto put; + } + + /* + * If the Tegra DRM clients are backed by an IOMMU, push buffers are + * likely to be allocated beyond the 32-bit boundary if sufficient + * system memory is available. This is problematic on earlier Tegra + * generations where host1x supports a maximum of 32 address bits in + * the GATHER opcode. In this case, unless host1x is behind an IOMMU + * as well it won't be able to process buffers allocated beyond the + * 32-bit boundary. + * + * The DMA API will use bounce buffers in this case, so that could + * perhaps still be made to work, even if less efficient, but there + * is another catch: in order to perform cache maintenance on pages + * allocated for discontiguous buffers we need to map and unmap the + * SG table representing these buffers. This is fine for something + * small like a push buffer, but it exhausts the bounce buffer pool + * (typically on the order of a few MiB) for framebuffers (many MiB + * for any modern resolution). + * + * Work around this by making sure that Tegra DRM clients only use + * an IOMMU if the parent host1x also uses an IOMMU. + * + * Note that there's still a small gap here that we don't cover: if + * the DMA API is backed by an IOMMU there's no way to control which + * device is attached to an IOMMU and which isn't, except via wiring + * up the device tree appropriately. This is considered an problem + * of integration, so care must be taken for the DT to be consistent. + */ + domain = iommu_get_domain_for_dev(drm->dev->parent); + + if (domain && iommu_present(&platform_bus_type)) { + tegra->domain = iommu_domain_alloc(&platform_bus_type); + if (!tegra->domain) { + err = -ENOMEM; + goto free; + } + + err = iova_cache_get(); + if (err < 0) + goto domain; + } + + mutex_init(&tegra->clients_lock); + INIT_LIST_HEAD(&tegra->clients); + dev_set_drvdata(&dev->dev, drm); + drm->dev_private = tegra; + tegra->drm = drm; + + drm_mode_config_init(drm); + + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + + drm->mode_config.allow_fb_modifiers = true; + + drm->mode_config.normalize_zpos = true; - err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false); + drm->mode_config.funcs = &tegra_drm_mode_config_funcs; + drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; + + err = tegra_drm_fb_prepare(drm); if (err < 0) - goto put; + goto config; + + drm_kms_helper_poll_init(drm); + + err = host1x_device_init(dev); + if (err < 0) + goto fbdev; + + if (tegra->use_explicit_iommu) { + u64 carveout_start, carveout_end, gem_start, gem_end; + u64 dma_mask = dma_get_mask(&dev->dev); + dma_addr_t start, end; + unsigned long order; + + start = tegra->domain->geometry.aperture_start & dma_mask; + end = tegra->domain->geometry.aperture_end & dma_mask; + + gem_start = start; + gem_end = end - CARVEOUT_SZ; + carveout_start = gem_end + 1; + carveout_end = end; + + order = __ffs(tegra->domain->pgsize_bitmap); + init_iova_domain(&tegra->carveout.domain, 1UL << order, + carveout_start >> order); + + tegra->carveout.shift = iova_shift(&tegra->carveout.domain); + tegra->carveout.limit = carveout_end >> tegra->carveout.shift; + + drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); + mutex_init(&tegra->mm_lock); + + DRM_DEBUG_DRIVER("IOMMU apertures:\n"); + DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end); + DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start, + carveout_end); + } else if (tegra->domain) { + iommu_domain_free(tegra->domain); + tegra->domain = NULL; + iova_cache_put(); + } + + if (tegra->hub) { + err = tegra_display_hub_prepare(tegra->hub); + if (err < 0) + goto device; + } + + /* + * We don't use the drm_irq_install() helpers provided by the DRM + * core, so we need to set this manually in order to allow the + * DRM_IOCTL_WAIT_VBLANK to operate correctly. + */ + drm->irq_enabled = true; + + /* syncpoints are used for full 32-bit hardware VBLANK counters */ + drm->max_vblank_count = 0xffffffff; + + err = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (err < 0) + goto hub; + + drm_mode_config_reset(drm); + + err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", + false); + if (err < 0) + goto hub; + + err = tegra_drm_fb_init(drm); + if (err < 0) + goto hub; err = drm_dev_register(drm, 0); if (err < 0) - goto put; + goto fb; return 0; +fb: + tegra_drm_fb_exit(drm); +hub: + if (tegra->hub) + tegra_display_hub_cleanup(tegra->hub); +device: + if (tegra->domain) { + mutex_destroy(&tegra->mm_lock); + drm_mm_takedown(&tegra->mm); + put_iova_domain(&tegra->carveout.domain); + iova_cache_put(); + } + + host1x_device_exit(dev); +fbdev: + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_free(drm); +config: + drm_mode_config_cleanup(drm); +domain: + if (tegra->domain) + iommu_domain_free(tegra->domain); +free: + kfree(tegra); put: drm_dev_put(drm); return err; @@ -1229,8 +1231,32 @@ put: static int host1x_drm_remove(struct host1x_device *dev) { struct drm_device *drm = dev_get_drvdata(&dev->dev); + struct tegra_drm *tegra = drm->dev_private; + int err; drm_dev_unregister(drm); + + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_exit(drm); + drm_atomic_helper_shutdown(drm); + drm_mode_config_cleanup(drm); + + if (tegra->hub) + tegra_display_hub_cleanup(tegra->hub); + + err = host1x_device_exit(dev); + if (err < 0) + dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err); + + if (tegra->domain) { + mutex_destroy(&tegra->mm_lock); + drm_mm_takedown(&tegra->mm); + put_iova_domain(&tegra->carveout.domain); + iova_cache_put(); + iommu_domain_free(tegra->domain); + } + + kfree(tegra); drm_dev_put(drm); return 0; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 29911eff9ceb..d941553f7a3d 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -36,7 +36,7 @@ struct tegra_drm { struct drm_device *drm; struct iommu_domain *domain; - struct iommu_group *group; + bool use_explicit_iommu; struct mutex mm_lock; struct drm_mm mm; @@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra, struct tegra_drm_client *client); int tegra_drm_unregister_client(struct tegra_drm *tegra, struct tegra_drm_client *client); -struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, - bool shared); -void host1x_client_iommu_detach(struct host1x_client *client, - struct iommu_group *group); +int host1x_client_iommu_attach(struct host1x_client *client); +void host1x_client_iommu_detach(struct host1x_client *client); int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); int tegra_drm_exit(struct tegra_drm *tegra); @@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector); void tegra_output_encoder_destroy(struct drm_encoder *encoder); /* from dpaux.c */ -struct drm_dp_link; - struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np); enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux); int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output); int drm_dp_aux_detach(struct drm_dp_aux *aux); int drm_dp_aux_enable(struct drm_dp_aux *aux); int drm_dp_aux_disable(struct drm_dp_aux *aux); -int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding); -int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link, - u8 pattern); /* from fb.c */ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c index f49ad36e24db..56edef06c48e 100644 --- a/drivers/gpu/drm/tegra/falcon.c +++ b/drivers/gpu/drm/tegra/falcon.c @@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon, static void falcon_copy_firmware_image(struct falcon *falcon, const struct firmware *firmware) { - u32 *firmware_vaddr = falcon->firmware.vaddr; - dma_addr_t daddr; + u32 *virt = falcon->firmware.virt; size_t i; - int err; /* copy the whole thing taking into account endianness */ for (i = 0; i < firmware->size / sizeof(u32); i++) - firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]); - - /* ensure that caches are flushed and falcon can see the firmware */ - daddr = dma_map_single(falcon->dev, firmware_vaddr, - falcon->firmware.size, DMA_TO_DEVICE); - err = dma_mapping_error(falcon->dev, daddr); - if (err) { - dev_err(falcon->dev, "failed to map firmware: %d\n", err); - return; - } - dma_sync_single_for_device(falcon->dev, daddr, - falcon->firmware.size, DMA_TO_DEVICE); - dma_unmap_single(falcon->dev, daddr, falcon->firmware.size, - DMA_TO_DEVICE); + virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]); } static int falcon_parse_firmware_image(struct falcon *falcon) { - struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr; + struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt; struct falcon_fw_os_header_v1 *os; /* endian problems would show up right here */ @@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon) return -EINVAL; } - os = falcon->firmware.vaddr + bin->os_header_offset; + os = falcon->firmware.virt + bin->os_header_offset; falcon->firmware.bin_data.size = bin->os_size; falcon->firmware.bin_data.offset = bin->os_data_offset; @@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name) if (err < 0) return err; + falcon->firmware.size = falcon->firmware.firmware->size; + return 0; } @@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon) const struct firmware *firmware = falcon->firmware.firmware; int err; - falcon->firmware.size = firmware->size; - - /* allocate iova space for the firmware */ - falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size, - &falcon->firmware.paddr); - if (IS_ERR(falcon->firmware.vaddr)) { - dev_err(falcon->dev, "DMA memory mapping failed\n"); - return PTR_ERR(falcon->firmware.vaddr); - } - /* copy firmware image into local area. this also ensures endianness */ falcon_copy_firmware_image(falcon, firmware); @@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon) err = falcon_parse_firmware_image(falcon); if (err < 0) { dev_err(falcon->dev, "failed to parse firmware image\n"); - goto err_setup_firmware_image; + return err; } release_firmware(firmware); falcon->firmware.firmware = NULL; return 0; - -err_setup_firmware_image: - falcon->ops->free(falcon, falcon->firmware.size, - falcon->firmware.paddr, falcon->firmware.vaddr); - - return err; } int falcon_init(struct falcon *falcon) { - /* check mandatory ops */ - if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free) - return -EINVAL; - - falcon->firmware.vaddr = NULL; + falcon->firmware.virt = NULL; return 0; } void falcon_exit(struct falcon *falcon) { - if (falcon->firmware.firmware) { + if (falcon->firmware.firmware) release_firmware(falcon->firmware.firmware); - falcon->firmware.firmware = NULL; - } - - if (falcon->firmware.vaddr) { - falcon->ops->free(falcon, falcon->firmware.size, - falcon->firmware.paddr, - falcon->firmware.vaddr); - falcon->firmware.vaddr = NULL; - } } int falcon_boot(struct falcon *falcon) @@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon) u32 value; int err; - if (!falcon->firmware.vaddr) + if (!falcon->firmware.virt) return -EINVAL; err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value, @@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon) falcon_writel(falcon, 0, FALCON_DMACTL); /* setup the address of the binary data so Falcon can access it later */ - falcon_writel(falcon, (falcon->firmware.paddr + + falcon_writel(falcon, (falcon->firmware.iova + falcon->firmware.bin_data.offset) >> 8, FALCON_DMATRFBASE); diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h index 3d1243217410..c56ee32d92ee 100644 --- a/drivers/gpu/drm/tegra/falcon.h +++ b/drivers/gpu/drm/tegra/falcon.h @@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 { u32 data_size; }; -struct falcon; - -struct falcon_ops { - void *(*alloc)(struct falcon *falcon, size_t size, - dma_addr_t *paddr); - void (*free)(struct falcon *falcon, size_t size, - dma_addr_t paddr, void *vaddr); -}; - struct falcon_firmware_section { unsigned long offset; size_t size; @@ -93,8 +84,9 @@ struct falcon_firmware { const struct firmware *firmware; /* Raw firmware data */ - dma_addr_t paddr; - void *vaddr; + dma_addr_t iova; + dma_addr_t phys; + void *virt; size_t size; /* Parsed firmware information */ @@ -107,8 +99,6 @@ struct falcon { /* Set by falcon client */ struct device *dev; void __iomem *regs; - const struct falcon_ops *ops; - void *data; struct falcon_firmware firmware; }; diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 9d86081bf6c8..84f0e01e3428 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, } } - drm->mode_config.fb_base = (resource_size_t)bo->paddr; + drm->mode_config.fb_base = (resource_size_t)bo->iova; info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; - info->fix.smem_start = (unsigned long)(bo->paddr + offset); + info->fix.smem_start = (unsigned long)(bo->iova + offset); info->fix.smem_len = size; return 0; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index d266b66c1292..1237df157e05 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -27,17 +27,98 @@ static void tegra_bo_put(struct host1x_bo *bo) drm_gem_object_put_unlocked(&obj->gem); } -static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) +/* XXX move this into lib/scatterlist.c? */ +static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg, + unsigned int nents, gfp_t gfp_mask) +{ + struct scatterlist *dst; + unsigned int i; + int err; + + err = sg_alloc_table(sgt, nents, gfp_mask); + if (err < 0) + return err; + + dst = sgt->sgl; + + for (i = 0; i < nents; i++) { + sg_set_page(dst, sg_page(sg), sg->length, 0); + dst = sg_next(dst); + sg = sg_next(sg); + } + + return 0; +} + +static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, + dma_addr_t *phys) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct sg_table *sgt; + int err; - *sgt = obj->sgt; + /* + * If we've manually mapped the buffer object through the IOMMU, make + * sure to return the IOVA address of our mapping. + */ + if (phys && obj->mm) { + *phys = obj->iova; + return NULL; + } - return obj->paddr; + /* + * If we don't have a mapping for this buffer yet, return an SG table + * so that host1x can do the mapping for us via the DMA API. + */ + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + if (obj->pages) { + /* + * If the buffer object was allocated from the explicit IOMMU + * API code paths, construct an SG table from the pages. + */ + err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, + 0, obj->gem.size, GFP_KERNEL); + if (err < 0) + goto free; + } else if (obj->sgt) { + /* + * If the buffer object already has an SG table but no pages + * were allocated for it, it means the buffer was imported and + * the SG table needs to be copied to avoid overwriting any + * other potential users of the original SG table. + */ + err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents, + GFP_KERNEL); + if (err < 0) + goto free; + } else { + /* + * If the buffer object had no pages allocated and if it was + * not imported, it had to be allocated with the DMA API, so + * the DMA API helper can be used. + */ + err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, + obj->gem.size); + if (err < 0) + goto free; + } + + return sgt; + +free: + kfree(sgt); + return ERR_PTR(err); } -static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) +static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } } static void *tegra_bo_mmap(struct host1x_bo *bo) @@ -105,9 +186,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) goto unlock; } - bo->paddr = bo->mm->start; + bo->iova = bo->mm->start; - bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, bo->sgt->nents, prot); if (!bo->size) { dev_err(tegra->drm->dev, "failed to map buffer\n"); @@ -133,7 +214,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) return 0; mutex_lock(&tegra->mm_lock); - iommu_unmap(tegra->domain, bo->paddr, bo->size); + iommu_unmap(tegra->domain, bo->iova, bo->size); drm_mm_remove_node(bo->mm); mutex_unlock(&tegra->mm_lock); @@ -181,7 +262,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) sg_free_table(bo->sgt); kfree(bo->sgt); } else if (bo->vaddr) { - dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); + dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova); } } @@ -236,7 +317,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) } else { size_t size = bo->gem.size; - bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, + bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, @@ -331,13 +412,6 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, err = tegra_bo_iommu_map(tegra, bo); if (err < 0) goto detach; - } else { - if (bo->sgt->nents > 1) { - err = -EINVAL; - goto detach; - } - - bo->paddr = sg_dma_address(bo->sgt->sgl); } bo->gem.import_attach = attach; @@ -433,7 +507,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; - err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, + err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova, gem->size); if (err < 0) { drm_gem_vm_close(vma); @@ -480,25 +554,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, return NULL; if (bo->pages) { - struct scatterlist *sg; - unsigned int i; - - if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) - goto free; - - for_each_sg(sgt->sgl, sg, bo->num_pages, i) - sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); - - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages, + 0, gem->size, GFP_KERNEL) < 0) goto free; } else { - if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, + gem->size) < 0) goto free; - - sg_dma_address(sgt->sgl) = bo->paddr; - sg_dma_len(sgt->sgl) = gem->size; } + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + goto free; + return sgt; free: diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 83ffb1e14ca3..fafb5724499b 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -31,7 +31,7 @@ struct tegra_bo { struct host1x_bo base; unsigned long flags; struct sg_table *sgt; - dma_addr_t paddr; + dma_addr_t iova; void *vaddr; struct drm_mm_node *mm; diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 641299cc85b8..1fc4e56c7cc5 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -17,7 +17,6 @@ struct gr2d_soc { }; struct gr2d { - struct iommu_group *group; struct tegra_drm_client client; struct host1x_channel *channel; struct clk *clk; @@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client) struct gr2d *gr2d = to_gr2d(drm); int err; - gr2d->channel = host1x_channel_request(client->dev); + gr2d->channel = host1x_channel_request(client); if (!gr2d->channel) return -ENOMEM; @@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client) goto put; } - gr2d->group = host1x_client_iommu_attach(client, false); - if (IS_ERR(gr2d->group)) { - err = PTR_ERR(gr2d->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); goto free; } @@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client) return 0; detach: - host1x_client_iommu_detach(client, gr2d->group); + host1x_client_iommu_detach(client); free: host1x_syncpt_free(client->syncpts[0]); put: @@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client) if (err < 0) return err; - host1x_client_iommu_detach(client, gr2d->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(gr2d->channel); diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 8b9a35b1cbb3..24fae0f64032 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -23,7 +23,6 @@ struct gr3d_soc { }; struct gr3d { - struct iommu_group *group; struct tegra_drm_client client; struct host1x_channel *channel; struct clk *clk_secondary; @@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client) struct gr3d *gr3d = to_gr3d(drm); int err; - gr3d->channel = host1x_channel_request(client->dev); + gr3d->channel = host1x_channel_request(client); if (!gr3d->channel) return -ENOMEM; @@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client) goto put; } - gr3d->group = host1x_client_iommu_attach(client, false); - if (IS_ERR(gr3d->group)) { - err = PTR_ERR(gr3d->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); goto free; } @@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client) return 0; detach: - host1x_client_iommu_detach(client, gr3d->group); + host1x_client_iommu_detach(client); free: host1x_syncpt_free(client->syncpts[0]); put: @@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client) if (err < 0) return err; - host1x_client_iommu_detach(client, gr3d->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(gr3d->channel); diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 839b49c40e51..47d985ac7cd7 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, unsigned int zpos = plane->state->normalized_zpos; struct drm_framebuffer *fb = plane->state->fb; struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_bo *bo; dma_addr_t base; u32 value; @@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, /* disable compression */ tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL); - bo = tegra_fb_get_plane(fb, 0); - base = bo->paddr; + base = state->iova[0] + fb->offsets[0]; tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH); tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS); @@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_shared_plane_atomic_check, .atomic_update = tegra_shared_plane_atomic_update, .atomic_disable = tegra_shared_plane_atomic_disable, @@ -605,11 +605,8 @@ static struct tegra_display_hub_state * tegra_display_hub_get_state(struct tegra_display_hub *hub, struct drm_atomic_state *state) { - struct drm_device *drm = dev_get_drvdata(hub->client.parent); struct drm_private_state *priv; - WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex)); - priv = drm_atomic_get_private_obj_state(state, &hub->base); if (IS_ERR(priv)) return ERR_CAST(priv); diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 4948373d8c75..80ddde4adbae 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force) void tegra_output_connector_destroy(struct drm_connector *connector) { + struct tegra_output *output = connector_to_output(connector); + + if (output->cec) + cec_notifier_conn_unregister(output->cec); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output) disable_irq(output->hpd_irq); } - output->cec = cec_notifier_get(output->dev); - if (!output->cec) - return -ENOMEM; - return 0; } void tegra_output_remove(struct tegra_output *output) { - if (output->cec) - cec_notifier_put(output->cec); - if (output->hpd_gpio) free_irq(output->hpd_irq, output); @@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output) int tegra_output_init(struct drm_device *drm, struct tegra_output *output) { + int connector_type; int err; if (output->panel) { @@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) if (output->hpd_gpio) enable_irq(output->hpd_irq); + connector_type = output->connector.connector_type; + /* + * Create a CEC notifier for HDMI connector. + */ + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) { + struct cec_connector_info conn_info; + + cec_fill_conn_info_from_drm(&conn_info, &output->connector); + output->cec = cec_notifier_conn_register(output->dev, NULL, + &conn_info); + if (!output->cec) + return -ENOMEM; + } + return 0; } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 6bab71d6e81d..cadcdd9ea427 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include "dc.h" @@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane) { struct tegra_plane *p = to_tegra_plane(plane); struct tegra_plane_state *state; + unsigned int i; if (plane->state) __drm_atomic_helper_plane_destroy_state(plane->state); @@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane) plane->state->plane = plane; plane->state->zpos = p->index; plane->state->normalized_zpos = p->index; + + for (i = 0; i < 3; i++) + state->iova[i] = DMA_MAPPING_ERROR; } } @@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane) for (i = 0; i < 2; i++) copy->blending[i] = state->blending[i]; + for (i = 0; i < 3; i++) { + copy->iova[i] = DMA_MAPPING_ERROR; + copy->sgt[i] = NULL; + } + return ©->base; } @@ -95,6 +105,111 @@ const struct drm_plane_funcs tegra_plane_funcs = { .format_mod_supported = tegra_plane_format_mod_supported, }; +static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) +{ + unsigned int i; + int err; + + for (i = 0; i < state->base.fb->format->num_planes; i++) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + + if (!dc->client.group) { + struct sg_table *sgt; + + sgt = host1x_bo_pin(dc->dev, &bo->base, NULL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + + err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + if (err == 0) { + err = -ENOMEM; + goto unpin; + } + + /* + * The display controller needs contiguous memory, so + * fail if the buffer is discontiguous and we fail to + * map its SG table to a single contiguous chunk of + * I/O virtual memory. + */ + if (err > 1) { + err = -EINVAL; + goto unpin; + } + + state->iova[i] = sg_dma_address(sgt->sgl); + state->sgt[i] = sgt; + } else { + state->iova[i] = bo->iova; + } + } + + return 0; + +unpin: + dev_err(dc->dev, "failed to map plane %u: %d\n", i, err); + + while (i--) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + struct sg_table *sgt = state->sgt[i]; + + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); + + state->iova[i] = DMA_MAPPING_ERROR; + state->sgt[i] = NULL; + } + + return err; +} + +static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) +{ + unsigned int i; + + for (i = 0; i < state->base.fb->format->num_planes; i++) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + + if (!dc->client.group) { + struct sg_table *sgt = state->sgt[i]; + + if (sgt) { + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); + } + } + + state->iova[i] = DMA_MAPPING_ERROR; + state->sgt[i] = NULL; + } +} + +int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(state->crtc); + + if (!state->fb) + return 0; + + drm_gem_fb_prepare_fb(plane, state); + + return tegra_dc_pin(dc, to_tegra_plane_state(state)); +} + +void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(state->crtc); + + if (dc) + tegra_dc_unpin(dc, to_tegra_plane_state(state)); +} + int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state) { diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h index 510c394e6d9a..a158a915109a 100644 --- a/drivers/gpu/drm/tegra/plane.h +++ b/drivers/gpu/drm/tegra/plane.h @@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state { struct tegra_plane_state { struct drm_plane_state base; + struct sg_table *sgt[3]; + dma_addr_t iova[3]; + struct tegra_bo_tiling tiling; u32 format; u32 swap; @@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state) extern const struct drm_plane_funcs tegra_plane_funcs; +int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state); +void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state); + int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index fbbb974c1e1a..a68d3b36b972 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -371,10 +371,11 @@ struct tegra_sor_regs { }; struct tegra_sor_soc { - bool supports_edp; bool supports_lvds; bool supports_hdmi; bool supports_dp; + bool supports_audio; + bool supports_hdcp; const struct tegra_sor_regs *regs; bool has_nvdisplay; @@ -383,6 +384,12 @@ struct tegra_sor_soc { unsigned int num_settings; const u8 *xbar_cfg; + const u8 *lane_map; + + const u8 (*voltage_swing)[4][4]; + const u8 (*pre_emphasis)[4][4]; + const u8 (*post_cursor)[4][4]; + const u8 (*tx_pu)[4][4]; }; struct tegra_sor; @@ -391,6 +398,8 @@ struct tegra_sor_ops { const char *name; int (*probe)(struct tegra_sor *sor); int (*remove)(struct tegra_sor *sor); + void (*audio_enable)(struct tegra_sor *sor); + void (*audio_disable)(struct tegra_sor *sor); }; struct tegra_sor { @@ -413,6 +422,7 @@ struct tegra_sor { u8 xbar_cfg[5]; + struct drm_dp_link link; struct drm_dp_aux *aux; struct drm_info_list *debugfs_files; @@ -515,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw) return container_of(hw, struct tegra_clk_sor_pad, hw); } -static const char * const tegra_clk_sor_pad_parents[] = { - "pll_d2_out0", "pll_dp" +static const char * const tegra_clk_sor_pad_parents[2][2] = { + { "pll_d_out0", "pll_dp" }, + { "pll_d2_out0", "pll_dp" }, }; +/* + * Implementing ->set_parent() here isn't really required because the parent + * will be explicitly selected in the driver code via the DP_CLK_SEL mux in + * the SOR_CLK_CNTRL register. This is primarily for compatibility with the + * Tegra186 and later SoC generations where the BPMP implements this clock + * and doesn't expose the mux via the common clock framework. + */ + static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index) { struct tegra_clk_sor_pad *pad = to_pad(hw); @@ -587,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, init.name = name; init.flags = 0; - init.parent_names = tegra_clk_sor_pad_parents; - init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents); + init.parent_names = tegra_clk_sor_pad_parents[sor->index]; + init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]); init.ops = &tegra_clk_sor_pad_ops; pad->hw.init = &init; @@ -598,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, return clk; } -static int tegra_sor_dp_train_fast(struct tegra_sor *sor, - struct drm_dp_link *link) +static void tegra_sor_filter_rates(struct tegra_sor *sor) { + struct drm_dp_link *link = &sor->link; unsigned int i; - u8 pattern; + + /* Tegra only supports RBR, HBR and HBR2 */ + for (i = 0; i < link->num_rates; i++) { + switch (link->rates[i]) { + case 1620000: + case 2700000: + case 5400000: + break; + + default: + DRM_DEBUG_KMS("link rate %lu kHz not supported\n", + link->rates[i]); + link->rates[i] = 0; + break; + } + } + + drm_dp_link_update_rates(link); +} + +static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes) +{ + unsigned long timeout; u32 value; - int err; - /* setup lane parameters */ - value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE2(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE1(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE0(0x40); - tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0); + /* + * Clear or set the PD_TXD bit corresponding to each lane, depending + * on whether it is used or not. + */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) | - SOR_LANE_PREEMPHASIS_LANE2(0x0f) | - SOR_LANE_PREEMPHASIS_LANE1(0x0f) | - SOR_LANE_PREEMPHASIS_LANE0(0x0f); - tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0); + if (lanes <= 2) + value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2])); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]); - value = SOR_LANE_POSTCURSOR_LANE3(0x00) | - SOR_LANE_POSTCURSOR_LANE2(0x00) | - SOR_LANE_POSTCURSOR_LANE1(0x00) | - SOR_LANE_POSTCURSOR_LANE0(0x00); - tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0); + if (lanes <= 1) + value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]); - /* disable LVDS mode */ - tegra_sor_writel(sor, 0, SOR_LVDS); + if (lanes == 0) + value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]); + + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | + SOR_LANE_SEQ_CTL_POWER_STATE_UP; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(250, 1000); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + return 0; +} + +static int tegra_sor_power_down_lanes(struct tegra_sor *sor) +{ + unsigned long timeout; + u32 value; + + /* power down all lanes */ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_TX_PU_ENABLE; - value &= ~SOR_DP_PADCTL_TX_PU_MASK; - value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */ + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | + SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + return 0; +} + +static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes) +{ + u32 value; + + /* pre-charge all used lanes */ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | - SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0; + + if (lanes <= 2) + value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2])); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]); + + if (lanes <= 1) + value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]); + + if (lanes == 0) + value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]); + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - usleep_range(10, 100); + usleep_range(15, 100); value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); +} - err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B); - if (err < 0) - return err; +static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor) +{ + u32 mask = 0x08, adj = 0, value; + + /* enable pad calibration logic */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value &= ~SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value |= SOR_PLL1_TMDS_TERM; + tegra_sor_writel(sor, value, sor->soc->regs->pll1); + + while (mask) { + adj |= mask; + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); + + usleep_range(100, 200); - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_NONE | - SOR_DP_TPG_PATTERN_TRAIN1; - value = (value << 8) | lane; + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + if (value & SOR_PLL1_TERM_COMPOUT) + adj &= ~mask; + + mask >>= 1; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - pattern = DP_TRAINING_PATTERN_1; + /* disable pad calibration logic */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value |= SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); +} - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) - return err; +static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link) +{ + struct tegra_sor *sor = container_of(link, struct tegra_sor, link); + u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0; + const struct tegra_sor_soc *soc = sor->soc; + u32 pattern = 0, tx_pu = 0, value; + unsigned int i; - value = tegra_sor_readl(sor, SOR_DP_SPARE0); - value |= SOR_DP_SPARE_SEQ_ENABLE; - value &= ~SOR_DP_SPARE_PANEL_INTERNAL; - value |= SOR_DP_SPARE_MACRO_SOR_CLK; - tegra_sor_writel(sor, value, SOR_DP_SPARE0); + for (value = 0, i = 0; i < link->lanes; i++) { + u8 vs = link->train.request.voltage_swing[i]; + u8 pe = link->train.request.pre_emphasis[i]; + u8 pc = link->train.request.post_cursor[i]; + u8 shift = sor->soc->lane_map[i] << 3; + + voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift; + pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift; + post_cursor |= soc->post_cursor[pc][vs][pe] << shift; - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_NONE | - SOR_DP_TPG_PATTERN_TRAIN2; - value = (value << 8) | lane; + if (sor->soc->tx_pu[pc][vs][pe] > tx_pu) + tx_pu = sor->soc->tx_pu[pc][vs][pe]; + + switch (link->train.pattern) { + case DP_TRAINING_PATTERN_DISABLE: + value = SOR_DP_TPG_SCRAMBLER_GALIOS | + SOR_DP_TPG_PATTERN_NONE; + break; + + case DP_TRAINING_PATTERN_1: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN1; + break; + + case DP_TRAINING_PATTERN_2: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN2; + break; + + case DP_TRAINING_PATTERN_3: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN3; + break; + + default: + return -EINVAL; + } + + if (link->caps.channel_coding) + value |= SOR_DP_TPG_CHANNEL_CODING; + + pattern = pattern << 8 | value; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0); + tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0); - pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2; + if (link->caps.tps3_supported) + tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0); - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) - return err; + tegra_sor_writel(sor, pattern, SOR_DP_TPG); + + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value &= ~SOR_DP_PADCTL_TX_PU_MASK; + value |= SOR_DP_PADCTL_TX_PU_ENABLE; + value |= SOR_DP_PADCTL_TX_PU(tx_pu); + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_sor_dp_link_configure(struct drm_dp_link *link) +{ + struct tegra_sor *sor = container_of(link, struct tegra_sor, link); + unsigned int rate, lanes; + u32 value; + int err; + + rate = drm_dp_link_rate_to_bw_code(link->rate); + lanes = link->lanes; - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; + /* configure link speed and lane count */ + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; + value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); + + if (link->caps.enhanced_framing) + value |= SOR_DP_LINKCTL_ENHANCED_FRAME; + + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + usleep_range(400, 1000); + + /* configure load pulse position adjustment */ + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_LOADADJ_MASK; + + switch (rate) { + case DP_LINK_BW_1_62: + value |= SOR_PLL1_LOADADJ(0x3); + break; + + case DP_LINK_BW_2_7: + value |= SOR_PLL1_LOADADJ(0x4); + break; + + case DP_LINK_BW_5_4: + value |= SOR_PLL1_LOADADJ(0x6); + break; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - pattern = DP_TRAINING_PATTERN_DISABLE; + /* use alternate scrambler reset for eDP */ + value = tegra_sor_readl(sor, SOR_DP_SPARE0); - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) + if (link->edp == 0) + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + else + value |= SOR_DP_SPARE_PANEL_INTERNAL; + + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + err = tegra_sor_power_down_lanes(sor); + if (err < 0) { + dev_err(sor->dev, "failed to power down lanes: %d\n", err); return err; + } + + /* power up and pre-charge lanes */ + err = tegra_sor_power_up_lanes(sor, lanes); + if (err < 0) { + dev_err(sor->dev, "failed to power up %u lane%s: %d\n", + lanes, (lanes != 1) ? "s" : "", err); + return err; + } + + tegra_sor_dp_precharge(sor, lanes); return 0; } +static const struct drm_dp_link_ops tegra_sor_dp_link_ops = { + .apply_training = tegra_sor_dp_link_apply_training, + .configure = tegra_sor_dp_link_configure, +}; + static void tegra_sor_super_update(struct tegra_sor *sor) { tegra_sor_writel(sor, 0, SOR_SUPER_STATE0); @@ -913,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, u32 num_syms_per_line; unsigned int i; - if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel) + if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel) return -EINVAL; - output = link_rate * 8 * link->num_lanes; input = pclk * config->bits_per_pixel; + output = link_rate * 8 * link->lanes; if (input >= output) return -ERANGE; @@ -960,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, watermark = div_u64(watermark + params.error, f); config->watermark = watermark + (config->bits_per_pixel / 8) + 2; num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) * - (link->num_lanes * 8); + (link->lanes * 8); if (config->watermark > 30) { config->watermark = 30; @@ -977,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, num = ((mode->htotal - mode->hdisplay) - 7) * link_rate; config->hblank_symbols = div_u64(num, pclk); - if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (link->caps.enhanced_framing) config->hblank_symbols -= 3; - config->hblank_symbols -= 12 / link->num_lanes; + config->hblank_symbols -= 12 / link->lanes; /* compute the number of symbols per vertical blanking interval */ num = (mode->hdisplay - 25) * link_rate; config->vblank_symbols = div_u64(num, pclk); - config->vblank_symbols -= 36 / link->num_lanes + 4; + config->vblank_symbols -= 36 / link->lanes + 4; dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols, config->vblank_symbols); @@ -1201,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor) return err; } - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | - SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - /* stop lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | - SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) - return -ETIMEDOUT; - value = tegra_sor_readl(sor, sor->soc->regs->pll2); value |= SOR_PLL2_PORT_POWERDOWN; tegra_sor_writel(sor, value, sor->soc->regs->pll2); @@ -1585,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_sor_edp_disable(struct drm_encoder *encoder) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_sor *sor = to_sor(output); - u32 value; - int err; - - if (output->panel) - drm_panel_disable(output->panel); - - err = tegra_sor_detach(sor); - if (err < 0) - dev_err(sor->dev, "failed to detach SOR: %d\n", err); - - tegra_sor_writel(sor, 0, SOR_STATE1); - tegra_sor_update(sor); - - /* - * The following accesses registers of the display controller, so make - * sure it's only executed when the output is attached to one. - */ - if (dc) { - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value &= ~SOR_ENABLE(0); - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - } - - err = tegra_sor_power_down(sor); - if (err < 0) - dev_err(sor->dev, "failed to power down SOR: %d\n", err); - - if (sor->aux) { - err = drm_dp_aux_disable(sor->aux); - if (err < 0) - dev_err(sor->dev, "failed to disable DP: %d\n", err); - } - - err = tegra_io_pad_power_disable(sor->pad); - if (err < 0) - dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); - - if (output->panel) - drm_panel_unprepare(output->panel); - - pm_runtime_put(sor->dev); -} - -#if 0 -static int calc_h_ref_to_sync(const struct drm_display_mode *mode, - unsigned int *value) -{ - unsigned int hfp, hsw, hbp, a = 0, b; - - hfp = mode->hsync_start - mode->hdisplay; - hsw = mode->hsync_end - mode->hsync_start; - hbp = mode->htotal - mode->hsync_end; - - pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp); - - b = hfp - 1; - - pr_info("a: %u, b: %u\n", a, b); - pr_info("a + hsw + hbp = %u\n", a + hsw + hbp); - - if (a + hsw + hbp <= 11) { - a = 1 + 11 - hsw - hbp; - pr_info("a: %u\n", a); - } - - if (a > b) - return -EINVAL; - - if (hsw < 1) - return -EINVAL; - - if (mode->hdisplay < 16) - return -EINVAL; - - if (value) { - if (b > a && a % 2) - *value = a + 1; - else - *value = a; - } - - return 0; -} -#endif - -static void tegra_sor_edp_enable(struct drm_encoder *encoder) -{ - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_sor *sor = to_sor(output); - struct tegra_sor_config config; - struct tegra_sor_state *state; - struct drm_dp_link link; - u8 rate, lanes; - unsigned int i; - int err = 0; - u32 value; - - state = to_sor_state(output->connector.state); - - pm_runtime_get_sync(sor->dev); - - if (output->panel) - drm_panel_prepare(output->panel); - - err = drm_dp_aux_enable(sor->aux); - if (err < 0) - dev_err(sor->dev, "failed to enable DP: %d\n", err); - - err = drm_dp_link_probe(sor->aux, &link); - if (err < 0) { - dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - return; - } - - /* switch to safe parent clock */ - err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) - dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); - - memset(&config, 0, sizeof(config)); - config.bits_per_pixel = state->bpc * 3; - - err = tegra_sor_compute_config(sor, mode, &config, &link); - if (err < 0) - dev_err(sor->dev, "failed to compute configuration: %d\n", err); - - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; - value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - usleep_range(20, 100); - - value = tegra_sor_readl(sor, sor->soc->regs->pll3); - value |= SOR_PLL3_PLL_VDD_MODE_3V3; - tegra_sor_writel(sor, value, sor->soc->regs->pll3); - - value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST | - SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value |= SOR_PLL2_SEQ_PLLCAPPD; - value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; - value |= SOR_PLL2_LVDS_ENABLE; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM; - tegra_sor_writel(sor, value, sor->soc->regs->pll1); - - while (true) { - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0) - break; - - usleep_range(250, 1000); - } - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; - value &= ~SOR_PLL2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - /* - * power up - */ - - /* set safe link bandwidth (1.62 Gbps) */ - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - /* step 1 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN | - SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - value = tegra_sor_readl(sor, sor->soc->regs->pll0); - value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value &= ~SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - /* step 2 */ - err = tegra_io_pad_power_enable(sor->pad); - if (err < 0) - dev_err(sor->dev, "failed to power on I/O pad: %d\n", err); - - usleep_range(5, 100); - - /* step 3 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - usleep_range(20, 100); - - /* step 4 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll0); - value &= ~SOR_PLL0_VCOPD; - value &= ~SOR_PLL0_PWR; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - usleep_range(200, 1000); - - /* step 5 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - /* XXX not in TRM */ - for (value = 0, i = 0; i < 5; i++) - value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) | - SOR_XBAR_CTRL_LINK1_XSEL(i, i); - - tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); - tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - - /* switch to DP parent clock */ - err = tegra_sor_set_parent_clock(sor, sor->clk_dp); - if (err < 0) - dev_err(sor->dev, "failed to set parent clock: %d\n", err); - - /* power DP lanes */ - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - - if (link.num_lanes <= 2) - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2); - else - value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2; - - if (link.num_lanes <= 1) - value &= ~SOR_DP_PADCTL_PD_TXD_1; - else - value |= SOR_DP_PADCTL_PD_TXD_1; - - if (link.num_lanes == 0) - value &= ~SOR_DP_PADCTL_PD_TXD_0; - else - value |= SOR_DP_PADCTL_PD_TXD_0; - - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; - value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes); - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - /* start lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | - SOR_LANE_SEQ_CTL_POWER_STATE_UP; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - while (true) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(250, 1000); - } - - /* set link bandwidth */ - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= drm_dp_link_rate_to_bw_code(link.rate) << 2; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - tegra_sor_apply_config(sor, &config); - - /* enable link */ - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value |= SOR_DP_LINKCTL_ENABLE; - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - for (i = 0, value = 0; i < 4; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; - } - - tegra_sor_writel(sor, value, SOR_DP_TPG); - - /* enable pad calibration logic */ - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - err = drm_dp_link_probe(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - - err = drm_dp_link_power_up(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to power up eDP link: %d\n", err); - - err = drm_dp_link_configure(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to configure eDP link: %d\n", err); - - rate = drm_dp_link_rate_to_bw_code(link.rate); - lanes = link.num_lanes; - - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; - value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); - - if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - /* disable training pattern generator */ - - for (i = 0; i < link.num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; - } - - tegra_sor_writel(sor, value, SOR_DP_TPG); - - err = tegra_sor_dp_train_fast(sor, &link); - if (err < 0) - dev_err(sor->dev, "DP fast link training failed: %d\n", err); - - dev_dbg(sor->dev, "fast link training succeeded\n"); - - err = tegra_sor_power_up(sor, 250); - if (err < 0) - dev_err(sor->dev, "failed to power up SOR: %d\n", err); - - /* CSTM (LVDS, link A/B, upper) */ - value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | - SOR_CSTM_UPPER; - tegra_sor_writel(sor, value, SOR_CSTM); - - /* use DP-A protocol */ - value = tegra_sor_readl(sor, SOR_STATE1); - value &= ~SOR_STATE_ASY_PROTOCOL_MASK; - value |= SOR_STATE_ASY_PROTOCOL_DP_A; - tegra_sor_writel(sor, value, SOR_STATE1); - - tegra_sor_mode_set(sor, mode, state); - - /* PWM setup */ - err = tegra_sor_setup_pwm(sor, 250); - if (err < 0) - dev_err(sor->dev, "failed to setup PWM: %d\n", err); - - tegra_sor_update(sor); - - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value |= SOR_ENABLE(0); - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - - err = tegra_sor_attach(sor); - if (err < 0) - dev_err(sor->dev, "failed to attach SOR: %d\n", err); - - err = tegra_sor_wakeup(sor); - if (err < 0) - dev_err(sor->dev, "failed to enable DC: %d\n", err); - - if (output->panel) - drm_panel_enable(output->panel); -} - static int tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -2031,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, return 0; } -static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = { - .disable = tegra_sor_edp_disable, - .enable = tegra_sor_edp_enable, - .atomic_check = tegra_sor_encoder_atomic_check, -}; - static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size) { u32 value = 0; @@ -2161,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor) { u32 value; + /* + * Enable and unmask the HDA codec SCRATCH0 register interrupt. This + * is used for interoperability between the HDA codec driver and the + * HDMI/DP driver. + */ + value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0; + tegra_sor_writel(sor, value, SOR_INT_ENABLE); + tegra_sor_writel(sor, value, SOR_INT_MASK); + tegra_sor_write_eld(sor); value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD; @@ -2170,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor) static void tegra_sor_audio_unprepare(struct tegra_sor *sor) { tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE); + tegra_sor_writel(sor, 0, SOR_INT_MASK); + tegra_sor_writel(sor, 0, SOR_INT_ENABLE); +} + +static void tegra_sor_audio_enable(struct tegra_sor *sor) +{ + u32 value; + + value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL); + + /* select HDA audio input */ + value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK); + value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA); + + /* inject null samples */ + if (sor->format.channels != 2) + value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL; + else + value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL; + + value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH; + + tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL); + + /* enable advertising HBR capability */ + tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE); } static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor) @@ -2207,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor) { u32 value; - value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL); - - /* select HDA audio input */ - value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK); - value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA); - - /* inject null samples */ - if (sor->format.channels != 2) - value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL; - else - value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL; - - value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH; - - tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL); - - /* enable advertising HBR capability */ - tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE); + tegra_sor_audio_enable(sor); tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL); @@ -2400,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!sor->soc->has_nvdisplay) - value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1)); - else - value &= ~SOR_ENABLE(sor->index); + value &= ~SOR1_TIMING_CYA; + + value &= ~SOR_ENABLE(sor->index); tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); @@ -2560,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - /* switch to parent clock */ - err = clk_set_parent(sor->clk, sor->clk_parent); + /* + * Switch the pad clock to the DP clock. Note that we cannot actually + * do this because Tegra186 and later don't support clk_set_parent() + * on the sorX_pad_clkout clocks. We already do the equivalent above + * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register. + */ +#if 0 + err = clk_set_parent(sor->clk_pad, sor->clk_dp); if (err < 0) { - dev_err(sor->dev, "failed to set parent clock: %d\n", err); + dev_err(sor->dev, "failed to select pad parent clock: %d\n", + err); return; } +#endif + /* switch the SOR clock to the pad clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_pad); if (err < 0) { - dev_err(sor->dev, "failed to set pad clock: %d\n", err); + dev_err(sor->dev, "failed to select SOR parent clock: %d\n", + err); + return; + } + + /* switch the output clock to the parent pixel clock */ + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select output parent clock: %d\n", + err); return; } @@ -2775,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!sor->soc->has_nvdisplay) - value |= SOR_ENABLE(1) | SOR1_TIMING_CYA; - else - value |= SOR_ENABLE(sor->index); + value |= SOR1_TIMING_CYA; + + value |= SOR_ENABLE(sor->index); tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); @@ -2804,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = { .atomic_check = tegra_sor_encoder_atomic_check, }; +static void tegra_sor_dp_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + u32 value; + int err; + + if (output->panel) + drm_panel_disable(output->panel); + + /* + * Do not attempt to power down a DP link if we're not connected since + * the AUX transactions would just be timing out. + */ + if (output->connector.status != connector_status_disconnected) { + err = drm_dp_link_power_down(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to power down link: %d\n", + err); + } + + err = tegra_sor_detach(sor); + if (err < 0) + dev_err(sor->dev, "failed to detach SOR: %d\n", err); + + tegra_sor_writel(sor, 0, SOR_STATE1); + tegra_sor_update(sor); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~SOR_ENABLE(sor->index); + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + tegra_dc_commit(dc); + + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value &= ~SOR_STATE_ASY_SUBOWNER_MASK; + value &= ~SOR_STATE_ASY_OWNER_MASK; + tegra_sor_writel(sor, value, SOR_STATE1); + tegra_sor_update(sor); + + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe clock: %d\n", err); + + err = tegra_sor_power_down(sor); + if (err < 0) + dev_err(sor->dev, "failed to power down SOR: %d\n", err); + + err = tegra_io_pad_power_disable(sor->pad); + if (err < 0) + dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); + + err = drm_dp_aux_disable(sor->aux); + if (err < 0) + dev_err(sor->dev, "failed disable DPAUX: %d\n", err); + + if (output->panel) + drm_panel_unprepare(output->panel); + + pm_runtime_put(sor->dev); +} + +static void tegra_sor_dp_enable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + struct tegra_sor_config config; + struct tegra_sor_state *state; + struct drm_display_mode *mode; + struct drm_display_info *info; + unsigned int i; + u32 value; + int err; + + state = to_sor_state(output->connector.state); + mode = &encoder->crtc->state->adjusted_mode; + info = &output->connector.display_info; + + pm_runtime_get_sync(sor->dev); + + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + err = tegra_io_pad_power_enable(sor->pad); + if (err < 0) + dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err); + + usleep_range(20, 100); + + err = drm_dp_aux_enable(sor->aux); + if (err < 0) + dev_err(sor->dev, "failed to enable DPAUX: %d\n", err); + + err = drm_dp_link_probe(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to probe DP link: %d\n", err); + + tegra_sor_filter_rates(sor); + + err = drm_dp_link_choose(&sor->link, mode, info); + if (err < 0) + dev_err(sor->dev, "failed to choose link: %d\n", err); + + if (output->panel) + drm_panel_prepare(output->panel); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + usleep_range(20, 40); + + value = tegra_sor_readl(sor, sor->soc->regs->pll3); + value |= SOR_PLL3_PLL_VDD_MODE_3V3; + tegra_sor_writel(sor, value, sor->soc->regs->pll3); + + value = tegra_sor_readl(sor, sor->soc->regs->pll0); + value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR); + tegra_sor_writel(sor, value, sor->soc->regs->pll0); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + value |= SOR_PLL2_SEQ_PLLCAPPD; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; + value &= ~SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; + + if (output->panel) + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; + else + value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK; + + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, SOR_DP_SPARE0); + /* XXX not in TRM */ + if (output->panel) + value |= SOR_DP_SPARE_PANEL_INTERNAL; + else + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + + value |= SOR_DP_SPARE_SEQ_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + /* XXX not in TRM */ + tegra_sor_writel(sor, 0, SOR_LVDS); + + value = tegra_sor_readl(sor, sor->soc->regs->pll0); + value &= ~SOR_PLL0_ICHPMP_MASK; + value &= ~SOR_PLL0_VCOCAP_MASK; + value |= SOR_PLL0_ICHPMP(0x1); + value |= SOR_PLL0_VCOCAP(0x3); + value |= SOR_PLL0_RESISTOR_EXT; + tegra_sor_writel(sor, value, sor->soc->regs->pll0); + + /* XXX not in TRM */ + for (value = 0, i = 0; i < 5; i++) + value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) | + SOR_XBAR_CTRL_LINK1_XSEL(i, i); + + tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); + tegra_sor_writel(sor, value, SOR_XBAR_CTRL); + + /* + * Switch the pad clock to the DP clock. Note that we cannot actually + * do this because Tegra186 and later don't support clk_set_parent() + * on the sorX_pad_clkout clocks. We already do the equivalent above + * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register. + */ +#if 0 + err = clk_set_parent(sor->clk_pad, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select pad parent clock: %d\n", + err); + return; + } +#endif + + /* switch the SOR clock to the pad clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_pad); + if (err < 0) { + dev_err(sor->dev, "failed to select SOR parent clock: %d\n", + err); + return; + } + + /* switch the output clock to the parent pixel clock */ + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select output parent clock: %d\n", + err); + return; + } + + /* use DP-A protocol */ + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value |= SOR_STATE_ASY_PROTOCOL_DP_A; + tegra_sor_writel(sor, value, SOR_STATE1); + + /* enable port */ + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value |= SOR_DP_LINKCTL_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + tegra_sor_dp_term_calibrate(sor); + + err = drm_dp_link_train(&sor->link); + if (err < 0) + dev_err(sor->dev, "link training failed: %d\n", err); + else + dev_dbg(sor->dev, "link training succeeded\n"); + + err = drm_dp_link_power_up(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to power up DP link: %d\n", err); + + /* compute configuration */ + memset(&config, 0, sizeof(config)); + config.bits_per_pixel = state->bpc * 3; + + err = tegra_sor_compute_config(sor, mode, &config, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to compute configuration: %d\n", err); + + tegra_sor_apply_config(sor, &config); + tegra_sor_mode_set(sor, mode, state); + + if (output->panel) { + /* CSTM (LVDS, link A/B, upper) */ + value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | + SOR_CSTM_UPPER; + tegra_sor_writel(sor, value, SOR_CSTM); + + /* PWM setup */ + err = tegra_sor_setup_pwm(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to setup PWM: %d\n", err); + } + + tegra_sor_update(sor); + + err = tegra_sor_power_up(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to power up SOR: %d\n", err); + + /* attach and wake up */ + err = tegra_sor_attach(sor); + if (err < 0) + dev_err(sor->dev, "failed to attach SOR: %d\n", err); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= SOR_ENABLE(sor->index); + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + err = tegra_sor_wakeup(sor); + if (err < 0) + dev_err(sor->dev, "failed to wakeup SOR: %d\n", err); + + if (output->panel) + drm_panel_enable(output->panel); +} + +static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = { + .disable = tegra_sor_dp_disable, + .enable = tegra_sor_dp_enable, + .atomic_check = tegra_sor_encoder_atomic_check, +}; + +static int tegra_sor_hdmi_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); + if (IS_ERR(sor->avdd_io_supply)) { + dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", + PTR_ERR(sor->avdd_io_supply)); + return PTR_ERR(sor->avdd_io_supply); + } + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", + err); + return err; + } + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); + if (IS_ERR(sor->vdd_pll_supply)) { + dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", + PTR_ERR(sor->vdd_pll_supply)); + return PTR_ERR(sor->vdd_pll_supply); + } + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", + err); + return err; + } + + sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); + if (IS_ERR(sor->hdmi_supply)) { + dev_err(sor->dev, "cannot get HDMI supply: %ld\n", + PTR_ERR(sor->hdmi_supply)); + return PTR_ERR(sor->hdmi_supply); + } + + err = regulator_enable(sor->hdmi_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); + return err; + } + + INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work); + + return 0; +} + +static int tegra_sor_hdmi_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->hdmi_supply); + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_hdmi_ops = { + .name = "HDMI", + .probe = tegra_sor_hdmi_probe, + .remove = tegra_sor_hdmi_remove, + .audio_enable = tegra_sor_hdmi_audio_enable, + .audio_disable = tegra_sor_hdmi_audio_disable, +}; + +static int tegra_sor_dp_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp"); + if (IS_ERR(sor->avdd_io_supply)) + return PTR_ERR(sor->avdd_io_supply); + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) + return err; + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll"); + if (IS_ERR(sor->vdd_pll_supply)) + return PTR_ERR(sor->vdd_pll_supply); + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) + return err; + + return 0; +} + +static int tegra_sor_dp_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_dp_ops = { + .name = "DP", + .probe = tegra_sor_dp_probe, + .remove = tegra_sor_dp_remove, +}; + static int tegra_sor_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); @@ -2811,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int connector = DRM_MODE_CONNECTOR_Unknown; int encoder = DRM_MODE_ENCODER_NONE; - u32 value; int err; if (!sor->aux) { - if (sor->soc->supports_hdmi) { + if (sor->ops == &tegra_sor_hdmi_ops) { connector = DRM_MODE_CONNECTOR_HDMIA; encoder = DRM_MODE_ENCODER_TMDS; helpers = &tegra_sor_hdmi_helpers; @@ -2824,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client) encoder = DRM_MODE_ENCODER_LVDS; } } else { - if (sor->soc->supports_edp) { + if (sor->output.panel) { connector = DRM_MODE_CONNECTOR_eDP; encoder = DRM_MODE_ENCODER_TMDS; - helpers = &tegra_sor_edp_helpers; - } else if (sor->soc->supports_dp) { + helpers = &tegra_sor_dp_helpers; + } else { connector = DRM_MODE_CONNECTOR_DisplayPort; encoder = DRM_MODE_ENCODER_TMDS; + helpers = &tegra_sor_dp_helpers; } + + sor->link.ops = &tegra_sor_dp_link_ops; + sor->link.aux = sor->aux; } sor->output.dev = sor->dev; @@ -2914,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) return err; - /* - * Enable and unmask the HDA codec SCRATCH0 register interrupt. This - * is used for interoperability between the HDA codec driver and the - * HDMI/DP driver. - */ - value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0; - tegra_sor_writel(sor, value, SOR_INT_ENABLE); - tegra_sor_writel(sor, value, SOR_INT_MASK); - return 0; } @@ -2931,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int err; - tegra_sor_writel(sor, 0, SOR_INT_MASK); - tegra_sor_writel(sor, 0, SOR_INT_ENABLE); - tegra_output_exit(&sor->output); if (sor->aux) { @@ -2956,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = { .exit = tegra_sor_exit, }; -static const struct tegra_sor_ops tegra_sor_edp_ops = { - .name = "eDP", -}; - -static int tegra_sor_hdmi_probe(struct tegra_sor *sor) -{ - int err; - - sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); - if (IS_ERR(sor->avdd_io_supply)) { - dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", - PTR_ERR(sor->avdd_io_supply)); - return PTR_ERR(sor->avdd_io_supply); - } - - err = regulator_enable(sor->avdd_io_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", - err); - return err; - } - - sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); - if (IS_ERR(sor->vdd_pll_supply)) { - dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", - PTR_ERR(sor->vdd_pll_supply)); - return PTR_ERR(sor->vdd_pll_supply); - } - - err = regulator_enable(sor->vdd_pll_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", - err); - return err; - } - - sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); - if (IS_ERR(sor->hdmi_supply)) { - dev_err(sor->dev, "cannot get HDMI supply: %ld\n", - PTR_ERR(sor->hdmi_supply)); - return PTR_ERR(sor->hdmi_supply); - } - - err = regulator_enable(sor->hdmi_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); - return err; - } - - INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work); - - return 0; -} - -static int tegra_sor_hdmi_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->hdmi_supply); - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - -static const struct tegra_sor_ops tegra_sor_hdmi_ops = { - .name = "HDMI", - .probe = tegra_sor_hdmi_probe, - .remove = tegra_sor_hdmi_remove, -}; - static const u8 tegra124_sor_xbar_cfg[5] = { 0, 1, 2, 3, 4 }; @@ -3044,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = { .dp_padctl2 = 0x73, }; +/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */ +static const u8 tegra124_sor_lane_map[4] = { + 2, 1, 0, 3, +}; + +static const u8 tegra124_sor_voltage_swing[4][4][4] = { + { + { 0x13, 0x19, 0x1e, 0x28 }, + { 0x1e, 0x25, 0x2d, }, + { 0x28, 0x32, }, + { 0x3c, }, + }, { + { 0x12, 0x17, 0x1b, 0x25 }, + { 0x1c, 0x23, 0x2a, }, + { 0x25, 0x2f, }, + { 0x39, } + }, { + { 0x12, 0x16, 0x1a, 0x22 }, + { 0x1b, 0x20, 0x27, }, + { 0x24, 0x2d, }, + { 0x36, }, + }, { + { 0x11, 0x14, 0x17, 0x1f }, + { 0x19, 0x1e, 0x24, }, + { 0x22, 0x2a, }, + { 0x32, }, + }, +}; + +static const u8 tegra124_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x09, 0x13, 0x25 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00 }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, +}; + +static const u8 tegra124_sor_post_cursor[4][4][4] = { + { + { 0x00, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, }, + { 0x00, 0x00, }, + { 0x00, }, + }, { + { 0x02, 0x02, 0x04, 0x05 }, + { 0x02, 0x04, 0x05, }, + { 0x04, 0x05, }, + { 0x05, }, + }, { + { 0x04, 0x05, 0x08, 0x0b }, + { 0x05, 0x09, 0x0b, }, + { 0x08, 0x0a, }, + { 0x0b, }, + }, { + { 0x05, 0x09, 0x0b, 0x12 }, + { 0x09, 0x0d, 0x12, }, + { 0x0b, 0x0f, }, + { 0x12, }, + }, +}; + +static const u8 tegra124_sor_tx_pu[4][4][4] = { + { + { 0x20, 0x30, 0x40, 0x60 }, + { 0x30, 0x40, 0x60, }, + { 0x40, 0x60, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x30, 0x50 }, + { 0x30, 0x40, 0x50, }, + { 0x40, 0x50, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x30, 0x40, }, + { 0x30, 0x30, 0x40, }, + { 0x40, 0x50, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x20, 0x40, }, + { 0x30, 0x30, 0x40, }, + { 0x40, 0x40, }, + { 0x60, }, + }, +}; + static const struct tegra_sor_soc tegra124_sor = { - .supports_edp = true, .supports_lvds = true, .supports_hdmi = false, - .supports_dp = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, + .regs = &tegra124_sor_regs, + .has_nvdisplay = false, + .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, +}; + +static const u8 tegra132_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x01, 0x0e, 0x1d, }, + { 0x01, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00 }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, +}; + +static const struct tegra_sor_soc tegra132_sor = { + .supports_lvds = true, + .supports_hdmi = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, .regs = &tegra124_sor_regs, .has_nvdisplay = false, .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra132_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra210_sor_regs = { @@ -3069,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = { .dp_padctl2 = 0x73, }; +static const u8 tegra210_sor_xbar_cfg[5] = { + 2, 1, 0, 3, 4 +}; + +static const u8 tegra210_sor_lane_map[4] = { + 0, 1, 2, 3, +}; + static const struct tegra_sor_soc tegra210_sor = { - .supports_edp = true, .supports_lvds = false, .supports_hdmi = false, - .supports_dp = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, + .regs = &tegra210_sor_regs, .has_nvdisplay = false, - .xbar_cfg = tegra124_sor_xbar_cfg, -}; -static const u8 tegra210_sor_xbar_cfg[5] = { - 2, 1, 0, 3, 4 + .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra210_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_soc tegra210_sor1 = { - .supports_edp = false, .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra210_sor_regs, .has_nvdisplay = false, .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults), .settings = tegra210_sor_hdmi_defaults, - .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra210_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra186_sor_regs = { @@ -3113,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = { .dp_padctl2 = 0x16a, }; -static const struct tegra_sor_soc tegra186_sor = { - .supports_edp = false, - .supports_lvds = false, - .supports_hdmi = false, - .supports_dp = true, - - .regs = &tegra186_sor_regs, - .has_nvdisplay = true, +static const u8 tegra186_sor_voltage_swing[4][4][4] = { + { + { 0x13, 0x19, 0x1e, 0x28 }, + { 0x1e, 0x25, 0x2d, }, + { 0x28, 0x32, }, + { 0x39, }, + }, { + { 0x12, 0x16, 0x1b, 0x25 }, + { 0x1c, 0x23, 0x2a, }, + { 0x25, 0x2f, }, + { 0x37, } + }, { + { 0x12, 0x16, 0x1a, 0x22 }, + { 0x1b, 0x20, 0x27, }, + { 0x24, 0x2d, }, + { 0x35, }, + }, { + { 0x11, 0x14, 0x17, 0x1f }, + { 0x19, 0x1e, 0x24, }, + { 0x22, 0x2a, }, + { 0x32, }, + }, +}; - .xbar_cfg = tegra124_sor_xbar_cfg, +static const u8 tegra186_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x01, 0x0e, 0x1d, }, + { 0x01, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00 }, + }, { + { 0x00, 0x08, 0x14, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, }; -static const struct tegra_sor_soc tegra186_sor1 = { - .supports_edp = false, +static const struct tegra_sor_soc tegra186_sor = { .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra186_sor_regs, .has_nvdisplay = true, .num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults), .settings = tegra186_sor_hdmi_defaults, - .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra186_sor_voltage_swing, + .pre_emphasis = tegra186_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra194_sor_regs = { @@ -3156,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = { }; static const struct tegra_sor_soc tegra194_sor = { - .supports_edp = true, .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra194_sor_regs, .has_nvdisplay = true, @@ -3168,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = { .settings = tegra194_sor_hdmi_defaults, .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra186_sor_voltage_swing, + .pre_emphasis = tegra186_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct of_device_id tegra_sor_of_match[] = { { .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor }, - { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 }, { .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor }, { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 }, { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor }, + { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor }, { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor }, { }, }; @@ -3201,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor) * earlier */ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index; + } else { + if (!sor->soc->supports_audio) + sor->index = 0; + else + sor->index = 1; } err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5); @@ -3235,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data) tegra_hda_parse_format(format, &sor->format); - tegra_sor_hdmi_audio_enable(sor); + if (sor->ops->audio_enable) + sor->ops->audio_enable(sor); } else { - tegra_sor_hdmi_audio_disable(sor); + if (sor->ops->audio_disable) + sor->ops->audio_disable(sor); } } @@ -3274,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; + + sor->output.ddc = &sor->aux->ddc; } if (!sor->aux) { @@ -3288,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev) return -ENODEV; } } else { - if (sor->soc->supports_edp) { - sor->ops = &tegra_sor_edp_ops; - sor->pad = TEGRA_IO_PAD_LVDS; - } else if (sor->soc->supports_dp) { - dev_err(&pdev->dev, "DisplayPort not supported yet\n"); - return -ENODEV; - } else { - dev_err(&pdev->dev, "unknown (DP) support\n"); - return -ENODEV; - } + np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0); + /* + * No need to keep this around since we only use it as a check + * to see if a panel is connected (eDP) or not (DP). + */ + of_node_put(np); + + sor->ops = &tegra_sor_dp_ops; + sor->pad = TEGRA_IO_PAD_LVDS; } err = tegra_sor_parse_dt(sor); @@ -3452,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev) * pad output clock. */ if (!sor->clk_pad) { + char *name; + err = pm_runtime_get_sync(&pdev->dev); if (err < 0) { dev_err(&pdev->dev, "failed to get runtime PM: %d\n", @@ -3459,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } - sor->clk_pad = tegra_clk_sor_pad_register(sor, - "sor1_pad_clkout"); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index); + if (!name) { + err = -ENOMEM; + goto remove; + } + + sor->clk_pad = tegra_clk_sor_pad_register(sor, name); pm_runtime_put(&pdev->dev); } @@ -3517,8 +3912,7 @@ static int tegra_sor_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tegra_sor_suspend(struct device *dev) +static int tegra_sor_runtime_suspend(struct device *dev) { struct tegra_sor *sor = dev_get_drvdata(dev); int err; @@ -3540,7 +3934,7 @@ static int tegra_sor_suspend(struct device *dev) return 0; } -static int tegra_sor_resume(struct device *dev) +static int tegra_sor_runtime_resume(struct device *dev) { struct tegra_sor *sor = dev_get_drvdata(dev); int err; @@ -3572,10 +3966,39 @@ static int tegra_sor_resume(struct device *dev) return 0; } -#endif + +static int tegra_sor_suspend(struct device *dev) +{ + struct tegra_sor *sor = dev_get_drvdata(dev); + int err; + + if (sor->hdmi_supply) { + err = regulator_disable(sor->hdmi_supply); + if (err < 0) + return err; + } + + return 0; +} + +static int tegra_sor_resume(struct device *dev) +{ + struct tegra_sor *sor = dev_get_drvdata(dev); + int err; + + if (sor->hdmi_supply) { + err = regulator_enable(sor->hdmi_supply); + if (err < 0) + return err; + } + + return 0; +} static const struct dev_pm_ops tegra_sor_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL) + SET_RUNTIME_PM_OPS(tegra_sor_runtime_suspend, tegra_sor_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume) }; struct platform_driver tegra_sor_driver = { diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h index f8efd8be4b7c..00e09d5dca30 100644 --- a/drivers/gpu/drm/tegra/sor.h +++ b/drivers/gpu/drm/tegra/sor.h @@ -39,6 +39,7 @@ #define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6) #define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6) #define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6) +#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4) #define SOR_STATE_ASY_OWNER_MASK 0xf #define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0) @@ -283,10 +284,12 @@ #define SOR_DP_PADCTL_CM_TXD_2 (1 << 6) #define SOR_DP_PADCTL_CM_TXD_1 (1 << 5) #define SOR_DP_PADCTL_CM_TXD_0 (1 << 4) +#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x))) #define SOR_DP_PADCTL_PD_TXD_3 (1 << 3) #define SOR_DP_PADCTL_PD_TXD_0 (1 << 2) #define SOR_DP_PADCTL_PD_TXD_1 (1 << 1) #define SOR_DP_PADCTL_PD_TXD_2 (1 << 0) +#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x))) #define SOR_DP_PADCTL1 0x5d diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index cd0399fd8c63..3526c2892ddb 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -34,7 +34,6 @@ struct vic { void __iomem *regs; struct tegra_drm_client client; struct host1x_channel *channel; - struct iommu_domain *domain; struct device *dev; struct clk *clk; struct reset_control *rst; @@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev) static int vic_boot(struct vic *vic) { +#ifdef CONFIG_IOMMU_API + struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); +#endif u32 fce_ucode_size, fce_bin_data_offset; void *hdr; int err = 0; @@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic) return 0; #ifdef CONFIG_IOMMU_API - if (vic->config->supports_sid) { - struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); + if (vic->config->supports_sid && spec) { u32 value; value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); vic_writel(vic, value, VIC_TFBIF_TRANSCFG); - if (spec && spec->num_ids > 0) { + if (spec->num_ids > 0) { value = spec->ids[0] & 0xffff; vic_writel(vic, value, VIC_THI_STREAMID0); @@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic) if (err < 0) return err; - hdr = vic->falcon.firmware.vaddr; + hdr = vic->falcon.firmware.virt; fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); - hdr = vic->falcon.firmware.vaddr + + hdr = vic->falcon.firmware.virt + *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); @@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic) falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, fce_ucode_size); falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET, - (vic->falcon.firmware.paddr + fce_bin_data_offset) + (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); err = falcon_wait_idle(&vic->falcon); @@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic) return 0; } -static void *vic_falcon_alloc(struct falcon *falcon, size_t size, - dma_addr_t *iova) -{ - struct tegra_drm *tegra = falcon->data; - - return tegra_drm_alloc(tegra, size, iova); -} - -static void vic_falcon_free(struct falcon *falcon, size_t size, - dma_addr_t iova, void *va) -{ - struct tegra_drm *tegra = falcon->data; - - return tegra_drm_free(tegra, size, va, iova); -} - -static const struct falcon_ops vic_falcon_ops = { - .alloc = vic_falcon_alloc, - .free = vic_falcon_free -}; - static int vic_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct iommu_group *group = iommu_group_get(client->dev); struct drm_device *dev = dev_get_drvdata(client->parent); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; - if (group && tegra->domain) { - err = iommu_attach_group(tegra->domain, group); - if (err < 0) { - dev_err(vic->dev, "failed to attach to domain: %d\n", - err); - return err; - } - - vic->domain = tegra->domain; + err = host1x_client_iommu_attach(client); + if (err < 0 && err != -ENODEV) { + dev_err(vic->dev, "failed to attach to domain: %d\n", err); + return err; } - vic->channel = host1x_channel_request(client->dev); + vic->channel = host1x_channel_request(client); if (!vic->channel) { err = -ENOMEM; goto detach; @@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client) if (err < 0) goto free_syncpt; + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent device. + */ + client->dev->dma_parms = client->parent->dma_parms; + return 0; free_syncpt: @@ -221,8 +201,7 @@ free_syncpt: free_channel: host1x_channel_put(vic->channel); detach: - if (group && tegra->domain) - iommu_detach_group(tegra->domain, group); + host1x_client_iommu_detach(client); return err; } @@ -230,22 +209,32 @@ detach: static int vic_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct iommu_group *group = iommu_group_get(client->dev); struct drm_device *dev = dev_get_drvdata(client->parent); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + err = tegra_drm_unregister_client(tegra, drm); if (err < 0) return err; host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(vic->channel); - - if (vic->domain) { - iommu_detach_group(vic->domain, group); - vic->domain = NULL; + host1x_client_iommu_detach(client); + + if (client->group) { + dma_unmap_single(vic->dev, vic->falcon.firmware.phys, + vic->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, vic->falcon.firmware.size, + vic->falcon.firmware.virt, + vic->falcon.firmware.iova); + } else { + dma_free_coherent(vic->dev, vic->falcon.firmware.size, + vic->falcon.firmware.virt, + vic->falcon.firmware.iova); } return 0; @@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = { static int vic_load_firmware(struct vic *vic) { + struct host1x_client *client = &vic->client.base; + struct tegra_drm *tegra = vic->client.drm; + dma_addr_t iova; + size_t size; + void *virt; int err; - if (vic->falcon.data) + if (vic->falcon.firmware.virt) return 0; - vic->falcon.data = vic->client.drm; - err = falcon_read_firmware(&vic->falcon, vic->config->firmware); if (err < 0) - goto cleanup; + return err; + + size = vic->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); + + err = dma_mapping_error(vic->dev, iova); + if (err < 0) + return err; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + } + + vic->falcon.firmware.virt = virt; + vic->falcon.firmware.iova = iova; err = falcon_load_firmware(&vic->falcon); if (err < 0) goto cleanup; + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(vic->dev, phys); + if (err < 0) + goto cleanup; + + vic->falcon.firmware.phys = phys; + } + return 0; cleanup: - vic->falcon.data = NULL; + if (!client->group) + dma_free_coherent(vic->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + return err; } @@ -358,13 +386,14 @@ static const struct vic_config vic_t194_config = { .supports_sid = true, }; -static const struct of_device_id vic_match[] = { +static const struct of_device_id tegra_vic_of_match[] = { { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_vic_of_match); static int vic_probe(struct platform_device *pdev) { @@ -374,6 +403,13 @@ static int vic_probe(struct platform_device *pdev) struct vic *vic; int err; + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); if (!vic) return -ENOMEM; @@ -410,7 +446,6 @@ static int vic_probe(struct platform_device *pdev) vic->falcon.dev = dev; vic->falcon.regs = vic->regs; - vic->falcon.ops = &vic_falcon_ops; err = falcon_init(&vic->falcon); if (err < 0) @@ -482,7 +517,7 @@ static const struct dev_pm_ops vic_pm_ops = { struct platform_driver tegra_vic_driver = { .driver = { .name = "tegra-vic", - .of_match_table = vic_match, + .of_match_table = tegra_vic_of_match, .pm = &vic_pm_ops }, .probe = vic_probe, diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 01fc670ce7a2..caea2a099496 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,8 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \ - ttm_page_alloc_dma.o + ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o +ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7e7925fecd9e..5df596fb0280 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -899,7 +899,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *mem, + bool no_wait_gpu) { struct dma_fence *fence; int ret; @@ -908,19 +909,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); - if (fence) { - dma_resv_add_shared_fence(bo->base.resv, fence); + if (!fence) + return 0; - ret = dma_resv_reserve_shared(bo->base.resv, 1); - if (unlikely(ret)) { - dma_fence_put(fence); - return ret; - } + if (no_wait_gpu) + return -EBUSY; + + dma_resv_add_shared_fence(bo->base.resv, fence); - dma_fence_put(bo->moving); - bo->moving = fence; + ret = dma_resv_reserve_shared(bo->base.resv, 1); + if (unlikely(ret)) { + dma_fence_put(fence); + return ret; } + dma_fence_put(bo->moving); + bo->moving = fence; return 0; } @@ -951,7 +955,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; } while (1); - return ttm_bo_add_move_fence(bo, man, mem); + return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -1091,14 +1095,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) goto error; - if (mem->mm_node) { - ret = ttm_bo_add_move_fence(bo, man, mem); - if (unlikely(ret)) { - (*man->func->put_node)(man, mem); - goto error; - } - return 0; + if (!mem->mm_node) + continue; + + ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); + if (unlikely(ret)) { + (*man->func->put_node)(man, mem); + if (ret == -EBUSY) + continue; + + goto error; } + return 0; } for (i = 0; i < placement->num_busy_placement; ++i) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 3e8c3de91ae4..eebb4c06c04d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -128,6 +128,12 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct vm_fault *vmf) { + /* + * Work around locking order reversal in fault / nopfn + * between mmap_sem and bo_reserve: Perform a trylock operation + * for reserve, and if it fails, retry the fault after waiting + * for the buffer to become unreserved. + */ if (unlikely(!dma_resv_trylock(bo->base.resv))) { if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index ff54e7609e8f..bf876faea592 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -33,7 +33,6 @@ * when freed). */ -#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) #define pr_fmt(fmt) "[TTM] " fmt #include <linux/dma-mapping.h> @@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); - -#endif diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 4c4b59ae2c81..549dde83408b 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -560,14 +560,17 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (args->bcl_start != args->bcl_end) { bin = kcalloc(1, sizeof(*bin), GFP_KERNEL); - if (!bin) + if (!bin) { + v3d_job_put(&render->base); return -ENOMEM; + } ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) { kfree(bin); v3d_job_put(&render->base); + kfree(bin); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index 6b28a326f8bb..15acdf2a7c0f 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -8,6 +8,7 @@ config DRM_VMWGFX select FB_CFB_IMAGEBLIT select DRM_TTM select FB + select MAPPING_DIRTY_HELPERS # Only needed for the transitional use of drm_crtc_init - can be removed # again once vmwgfx sets up the primary plane itself. select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 8841bd30e1e5..c877a21a0739 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ - vmwgfx_validation.o \ + vmwgfx_validation.o vmwgfx_page_dirty.o \ ttm_object.o ttm_lock.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index f2bfd3d80598..61414f105c67 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -1280,7 +1280,6 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, return offset; } - static inline u32 svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, surf_size_struct baseLevelSize, @@ -1375,4 +1374,236 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format) return svga3dsurface_is_dx_screen_target_format(format); } +/** + * struct svga3dsurface_mip - Mimpmap level information + * @bytes: Bytes required in the backing store of this mipmap level. + * @img_stride: Byte stride per image. + * @row_stride: Byte stride per block row. + * @size: The size of the mipmap. + */ +struct svga3dsurface_mip { + size_t bytes; + size_t img_stride; + size_t row_stride; + struct drm_vmw_size size; + +}; + +/** + * struct svga3dsurface_cache - Cached surface information + * @desc: Pointer to the surface descriptor + * @mip: Array of mipmap level information. Valid size is @num_mip_levels. + * @mip_chain_bytes: Bytes required in the backing store for the whole chain + * of mip levels. + * @sheet_bytes: Bytes required in the backing store for a sheet + * representing a single sample. + * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in + * a chain. + * @num_layers: Number of slices in an array texture or number of faces in + * a cubemap texture. + */ +struct svga3dsurface_cache { + const struct svga3d_surface_desc *desc; + struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS]; + size_t mip_chain_bytes; + size_t sheet_bytes; + u32 num_mip_levels; + u32 num_layers; +}; + +/** + * struct svga3dsurface_loc - Surface location + * @sub_resource: Surface subresource. Defined as layer * num_mip_levels + + * mip_level. + * @x: X coordinate. + * @y: Y coordinate. + * @z: Z coordinate. + */ +struct svga3dsurface_loc { + u32 sub_resource; + u32 x, y, z; +}; + +/** + * svga3dsurface_subres - Compute the subresource from layer and mipmap. + * @cache: Surface layout data. + * @mip_level: The mipmap level. + * @layer: The surface layer (face or array slice). + * + * Return: The subresource. + */ +static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache, + u32 mip_level, u32 layer) +{ + return cache->num_mip_levels * layer + mip_level; +} + +/** + * svga3dsurface_setup_cache - Build a surface cache entry + * @size: The surface base level dimensions. + * @format: The surface format. + * @num_mip_levels: Number of mipmap levels. + * @num_layers: Number of layers. + * @cache: Pointer to a struct svga3dsurface_cach object to be filled in. + * + * Return: Zero on success, -EINVAL on invalid surface layout. + */ +static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size, + SVGA3dSurfaceFormat format, + u32 num_mip_levels, + u32 num_layers, + u32 num_samples, + struct svga3dsurface_cache *cache) +{ + const struct svga3d_surface_desc *desc; + u32 i; + + memset(cache, 0, sizeof(*cache)); + cache->desc = desc = svga3dsurface_get_desc(format); + cache->num_mip_levels = num_mip_levels; + cache->num_layers = num_layers; + for (i = 0; i < cache->num_mip_levels; i++) { + struct svga3dsurface_mip *mip = &cache->mip[i]; + + mip->size = svga3dsurface_get_mip_size(*size, i); + mip->bytes = svga3dsurface_get_image_buffer_size + (desc, &mip->size, 0); + mip->row_stride = + __KERNEL_DIV_ROUND_UP(mip->size.width, + desc->block_size.width) * + desc->bytes_per_block * num_samples; + if (!mip->row_stride) + goto invalid_dim; + + mip->img_stride = + __KERNEL_DIV_ROUND_UP(mip->size.height, + desc->block_size.height) * + mip->row_stride; + if (!mip->img_stride) + goto invalid_dim; + + cache->mip_chain_bytes += mip->bytes; + } + cache->sheet_bytes = cache->mip_chain_bytes * num_layers; + if (!cache->sheet_bytes) + goto invalid_dim; + + return 0; + +invalid_dim: + VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n"); + return -EINVAL; +} + +/** + * svga3dsurface_get_loc - Get a surface location from an offset into the + * backing store + * @cache: Surface layout data. + * @loc: Pointer to a struct svga3dsurface_loc to be filled in. + * @offset: Offset into the surface backing store. + */ +static inline void +svga3dsurface_get_loc(const struct svga3dsurface_cache *cache, + struct svga3dsurface_loc *loc, + size_t offset) +{ + const struct svga3dsurface_mip *mip = &cache->mip[0]; + const struct svga3d_surface_desc *desc = cache->desc; + u32 layer; + int i; + + if (offset >= cache->sheet_bytes) + offset %= cache->sheet_bytes; + + layer = offset / cache->mip_chain_bytes; + offset -= layer * cache->mip_chain_bytes; + for (i = 0; i < cache->num_mip_levels; ++i, ++mip) { + if (mip->bytes > offset) + break; + offset -= mip->bytes; + } + + loc->sub_resource = svga3dsurface_subres(cache, i, layer); + loc->z = offset / mip->img_stride; + offset -= loc->z * mip->img_stride; + loc->z *= desc->block_size.depth; + loc->y = offset / mip->row_stride; + offset -= loc->y * mip->row_stride; + loc->y *= desc->block_size.height; + loc->x = offset / desc->bytes_per_block; + loc->x *= desc->block_size.width; +} + +/** + * svga3dsurface_inc_loc - Clamp increment a surface location with one block + * size + * in each dimension. + * @loc: Pointer to a struct svga3dsurface_loc to be incremented. + * + * When computing the size of a range as size = end - start, the range does not + * include the end element. However a location representing the last byte + * of a touched region in the backing store *is* included in the range. + * This function modifies such a location to match the end definition + * given as start + size which is the one used in a SVGA3dBox. + */ +static inline void +svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache, + struct svga3dsurface_loc *loc) +{ + const struct svga3d_surface_desc *desc = cache->desc; + u32 mip = loc->sub_resource % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + + loc->sub_resource++; + loc->x += desc->block_size.width; + if (loc->x > size->width) + loc->x = size->width; + loc->y += desc->block_size.height; + if (loc->y > size->height) + loc->y = size->height; + loc->z += desc->block_size.depth; + if (loc->z > size->depth) + loc->z = size->depth; +} + +/** + * svga3dsurface_min_loc - The start location in a subresource + * @cache: Surface layout data. + * @sub_resource: The subresource. + * @loc: Pointer to a struct svga3dsurface_loc to be filled in. + */ +static inline void +svga3dsurface_min_loc(const struct svga3dsurface_cache *cache, + u32 sub_resource, + struct svga3dsurface_loc *loc) +{ + loc->sub_resource = sub_resource; + loc->x = loc->y = loc->z = 0; +} + +/** + * svga3dsurface_min_loc - The end location in a subresource + * @cache: Surface layout data. + * @sub_resource: The subresource. + * @loc: Pointer to a struct svga3dsurface_loc to be filled in. + * + * Following the end definition given in svga3dsurface_inc_loc(), + * Compute the end location of a surface subresource. + */ +static inline void +svga3dsurface_max_loc(const struct svga3dsurface_cache *cache, + u32 sub_resource, + struct svga3dsurface_loc *loc) +{ + const struct drm_vmw_size *size; + u32 mip; + + loc->sub_resource = sub_resource + 1; + mip = sub_resource % cache->num_mip_levels; + size = &cache->mip[mip].size; + loc->x = size->width; + loc->y = size->height; + loc->z = size->depth; +} + #endif /* _SVGA3D_SURFACEDEFS_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 74016a08d118..8b71bf6b58ef 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -462,6 +462,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) { struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); + WARN_ON(vmw_bo->dirty); + WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); vmw_bo_unmap(vmw_bo); kfree(vmw_bo); } @@ -475,8 +477,11 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) { struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); + struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; - vmw_bo_unmap(&vmw_user_bo->vbo); + WARN_ON(vbo->dirty); + WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); + vmw_bo_unmap(vbo); ttm_prime_object_kfree(vmw_user_bo, prime); } @@ -511,8 +516,7 @@ int vmw_bo_init(struct vmw_private *dev_priv, memset(vmw_bo, 0, sizeof(*vmw_bo)); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); vmw_bo->base.priority = 3; - - INIT_LIST_HEAD(&vmw_bo->res_list); + vmw_bo->res_tree = RB_ROOT; ret = ttm_bo_init(bdev, &vmw_bo->base, size, ttm_bo_type_device, placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 81a95651643f..e962048f65d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -576,8 +576,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) else dev_priv->map_mode = vmw_dma_map_populate; - /* No TTM coherent page pool? FIXME: Ask TTM instead! */ - if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && + if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && (dev_priv->map_mode == vmw_dma_alloc_coherent)) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index b18842f73081..a31e726d6d71 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -56,9 +56,9 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20180704" +#define VMWGFX_DRIVER_DATE "20190328" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 15 +#define VMWGFX_DRIVER_MINOR 16 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -100,17 +100,18 @@ struct vmw_fpriv { /** * struct vmw_buffer_object - TTM buffer object with vmwgfx additions * @base: The TTM buffer object - * @res_list: List of resources using this buffer object as a backing MOB + * @res_tree: RB tree of resources using this buffer object as a backing MOB * @pin_count: pin depth * @cpu_writers: Number of synccpu write grabs. Protected by reservation when * increased. May be decreased without reservation. * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB * @map: Kmap object for semi-persistent mappings * @res_prios: Eviction priority counts for attached resources + * @dirty: structure for user-space dirty-tracking */ struct vmw_buffer_object { struct ttm_buffer_object base; - struct list_head res_list; + struct rb_root res_tree; s32 pin_count; atomic_t cpu_writers; /* Not ref-counted. Protected by binding_mutex */ @@ -118,6 +119,7 @@ struct vmw_buffer_object { /* Protected by reservation */ struct ttm_bo_kmap_obj map; u32 res_prios[TTM_MAX_BO_PRIORITY]; + struct vmw_bo_dirty *dirty; }; /** @@ -148,7 +150,8 @@ struct vmw_res_func; * @res_dirty: Resource contains data not yet in the backup buffer. Protected * by resource reserved. * @backup_dirty: Backup buffer contains data not yet in the HW resource. - * Protecte by resource reserved. + * Protected by resource reserved. + * @coherent: Emulate coherency by tracking vm accesses. * @backup: The backup buffer if any. Protected by resource reserved. * @backup_offset: Offset into the backup buffer if any. Protected by resource * reserved. Note that only a few resource types can have a @backup_offset @@ -157,29 +160,32 @@ struct vmw_res_func; * pin-count greater than zero. It is not on the resource LRU lists and its * backup buffer is pinned. Hence it can't be evicted. * @func: Method vtable for this resource. Immutable. + * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. - * @mob_head: List head for the MOB backup list. Protected by @backup reserved. * @binding_head: List head for the context binding list. Protected by * the @dev_priv::binding_mutex * @res_free: The resource destructor. * @hw_destroy: Callback to destroy the resource on the device, as part of * resource destruction. */ +struct vmw_resource_dirty; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; int id; u32 used_prio; unsigned long backup_size; - bool res_dirty; - bool backup_dirty; + u32 res_dirty : 1; + u32 backup_dirty : 1; + u32 coherent : 1; struct vmw_buffer_object *backup; unsigned long backup_offset; unsigned long pin_count; const struct vmw_res_func *func; + struct rb_node mob_node; struct list_head lru_head; - struct list_head mob_head; struct list_head binding_head; + struct vmw_resource_dirty *dirty; void (*res_free) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res); }; @@ -678,7 +684,8 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); extern struct vmw_resource * vmw_resource_reference_unless_doomed(struct vmw_resource *res); -extern int vmw_resource_validate(struct vmw_resource *res, bool intr); +extern int vmw_resource_validate(struct vmw_resource *res, bool intr, + bool dirtying); extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); @@ -720,6 +727,10 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); void vmw_resource_mob_attach(struct vmw_resource *res); void vmw_resource_mob_detach(struct vmw_resource *res); +void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, + pgoff_t end); +int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, + pgoff_t end, pgoff_t *num_prefault); /** * vmw_resource_mob_attached - Whether a resource currently has a mob attached @@ -729,7 +740,7 @@ void vmw_resource_mob_detach(struct vmw_resource *res); */ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) { - return !list_empty(&res->mob_head); + return !RB_EMPTY_NODE(&res->mob_node); } /** @@ -1407,6 +1418,17 @@ int vmw_host_log(const char *log); #define VMW_DEBUG_USER(fmt, ...) \ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) +/* Resource dirtying - vmwgfx_page_dirty.c */ +void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo); +int vmw_bo_dirty_add(struct vmw_buffer_object *vbo); +void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); +void vmw_bo_dirty_clear_res(struct vmw_resource *res); +void vmw_bo_dirty_release(struct vmw_buffer_object *vbo); +void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, + pgoff_t start, pgoff_t end); +vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); +vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); + /** * VMW_DEBUG_KMS - Debug output for kernel mode-setting * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index ff86d49dc5e8..934ad7c0c342 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2560,7 +2560,6 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, offsetof(typeof(*cmd), sid)); cmd = container_of(header, typeof(*cmd), header); - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->sid, NULL); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c new file mode 100644 index 000000000000..f07aa857587c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/************************************************************************** + * + * Copyright 2019 VMware, Inc., Palo Alto, CA., USA + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +#include "vmwgfx_drv.h" + +/* + * Different methods for tracking dirty: + * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits + * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write- + * accesses in the VM mkwrite() callback + */ +enum vmw_bo_dirty_method { + VMW_BO_DIRTY_PAGETABLE, + VMW_BO_DIRTY_MKWRITE, +}; + +/* + * No dirtied pages at scan trigger a transition to the _MKWRITE method, + * similarly a certain percentage of dirty pages trigger a transition to + * the _PAGETABLE method. How many triggers should we wait for before + * changing method? + */ +#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2 + +/* Percentage to trigger a transition to the _PAGETABLE method */ +#define VMW_DIRTY_PERCENTAGE 10 + +/** + * struct vmw_bo_dirty - Dirty information for buffer objects + * @start: First currently dirty bit + * @end: Last currently dirty bit + 1 + * @method: The currently used dirty method + * @change_count: Number of consecutive method change triggers + * @ref_count: Reference count for this structure + * @bitmap_size: The size of the bitmap in bits. Typically equal to the + * nuber of pages in the bo. + * @size: The accounting size for this struct. + * @bitmap: A bitmap where each bit represents a page. A set bit means a + * dirty page. + */ +struct vmw_bo_dirty { + unsigned long start; + unsigned long end; + enum vmw_bo_dirty_method method; + unsigned int change_count; + unsigned int ref_count; + unsigned long bitmap_size; + size_t size; + unsigned long bitmap[0]; +}; + +/** + * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits + * @vbo: The buffer object to scan + * + * Scans the pagetable for dirty bits. Clear those bits and modify the + * dirty structure with the results. This function may change the + * dirty-tracking method. + */ +static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); + struct address_space *mapping = vbo->base.bdev->dev_mapping; + pgoff_t num_marked; + + num_marked = clean_record_shared_mapping_range + (mapping, + offset, dirty->bitmap_size, + offset, &dirty->bitmap[0], + &dirty->start, &dirty->end); + if (num_marked == 0) + dirty->change_count++; + else + dirty->change_count = 0; + + if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { + dirty->change_count = 0; + dirty->method = VMW_BO_DIRTY_MKWRITE; + wp_shared_mapping_range(mapping, + offset, dirty->bitmap_size); + clean_record_shared_mapping_range(mapping, + offset, dirty->bitmap_size, + offset, &dirty->bitmap[0], + &dirty->start, &dirty->end); + } +} + +/** + * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method + * @vbo: The buffer object to scan + * + * Write-protect pages written to so that consecutive write accesses will + * trigger a call to mkwrite. + * + * This function may change the dirty-tracking method. + */ +static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); + struct address_space *mapping = vbo->base.bdev->dev_mapping; + pgoff_t num_marked; + + if (dirty->end <= dirty->start) + return; + + num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping, + dirty->start + offset, + dirty->end - dirty->start); + + if (100UL * num_marked / dirty->bitmap_size > + VMW_DIRTY_PERCENTAGE) { + dirty->change_count++; + } else { + dirty->change_count = 0; + } + + if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { + pgoff_t start = 0; + pgoff_t end = dirty->bitmap_size; + + dirty->method = VMW_BO_DIRTY_PAGETABLE; + clean_record_shared_mapping_range(mapping, offset, end, offset, + &dirty->bitmap[0], + &start, &end); + bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size); + if (dirty->start < dirty->end) + bitmap_set(&dirty->bitmap[0], dirty->start, + dirty->end - dirty->start); + dirty->change_count = 0; + } +} + +/** + * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty + * tracking structure + * @vbo: The buffer object to scan + * + * This function may change the dirty tracking method. + */ +void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + + if (dirty->method == VMW_BO_DIRTY_PAGETABLE) + vmw_bo_dirty_scan_pagetable(vbo); + else + vmw_bo_dirty_scan_mkwrite(vbo); +} + +/** + * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before + * an unmap_mapping_range operation. + * @vbo: The buffer object, + * @start: First page of the range within the buffer object. + * @end: Last page of the range within the buffer object + 1. + * + * If we're using the _PAGETABLE scan method, we may leak dirty pages + * when calling unmap_mapping_range(). This function makes sure we pick + * up all dirty pages. + */ +static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo, + pgoff_t start, pgoff_t end) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); + struct address_space *mapping = vbo->base.bdev->dev_mapping; + + if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) + return; + + wp_shared_mapping_range(mapping, start + offset, end - start); + clean_record_shared_mapping_range(mapping, start + offset, + end - start, offset, + &dirty->bitmap[0], &dirty->start, + &dirty->end); +} + +/** + * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo + * @vbo: The buffer object, + * @start: First page of the range within the buffer object. + * @end: Last page of the range within the buffer object + 1. + * + * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange. + */ +void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, + pgoff_t start, pgoff_t end) +{ + unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); + struct address_space *mapping = vbo->base.bdev->dev_mapping; + + vmw_bo_dirty_pre_unmap(vbo, start, end); + unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, + (loff_t) (end - start) << PAGE_SHIFT); +} + +/** + * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object + * @vbo: The buffer object + * + * This function registers a dirty-tracking user to a buffer object. + * A user can be for example a resource or a vma in a special user-space + * mapping. + * + * Return: Zero on success, -ENOMEM on memory allocation failure. + */ +int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + pgoff_t num_pages = vbo->base.num_pages; + size_t size, acc_size; + int ret; + static struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + + if (dirty) { + dirty->ref_count++; + return 0; + } + + size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); + acc_size = ttm_round_pot(size); + ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); + if (ret) { + VMW_DEBUG_USER("Out of graphics memory for buffer object " + "dirty tracker.\n"); + return ret; + } + dirty = kvzalloc(size, GFP_KERNEL); + if (!dirty) { + ret = -ENOMEM; + goto out_no_dirty; + } + + dirty->size = acc_size; + dirty->bitmap_size = num_pages; + dirty->start = dirty->bitmap_size; + dirty->end = 0; + dirty->ref_count = 1; + if (num_pages < PAGE_SIZE / sizeof(pte_t)) { + dirty->method = VMW_BO_DIRTY_PAGETABLE; + } else { + struct address_space *mapping = vbo->base.bdev->dev_mapping; + pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); + + dirty->method = VMW_BO_DIRTY_MKWRITE; + + /* Write-protect and then pick up already dirty bits */ + wp_shared_mapping_range(mapping, offset, num_pages); + clean_record_shared_mapping_range(mapping, offset, num_pages, + offset, + &dirty->bitmap[0], + &dirty->start, &dirty->end); + } + + vbo->dirty = dirty; + + return 0; + +out_no_dirty: + ttm_mem_global_free(&ttm_mem_glob, acc_size); + return ret; +} + +/** + * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object + * @vbo: The buffer object + * + * This function releases a dirty-tracking user from a buffer object. + * If the reference count reaches zero, then the dirty-tracking object is + * freed and the pointer to it cleared. + * + * Return: Zero on success, -ENOMEM on memory allocation failure. + */ +void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + + if (dirty && --dirty->ref_count == 0) { + size_t acc_size = dirty->size; + + kvfree(dirty); + ttm_mem_global_free(&ttm_mem_glob, acc_size); + vbo->dirty = NULL; + } +} + +/** + * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from + * its backing mob. + * @res: The resource + * + * This function will pick up all dirty ranges affecting the resource from + * it's backup mob, and call vmw_resource_dirty_update() once for each + * range. The transferred ranges will be cleared from the backing mob's + * dirty tracking. + */ +void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) +{ + struct vmw_buffer_object *vbo = res->backup; + struct vmw_bo_dirty *dirty = vbo->dirty; + pgoff_t start, cur, end; + unsigned long res_start = res->backup_offset; + unsigned long res_end = res->backup_offset + res->backup_size; + + WARN_ON_ONCE(res_start & ~PAGE_MASK); + res_start >>= PAGE_SHIFT; + res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); + + if (res_start >= dirty->end || res_end <= dirty->start) + return; + + cur = max(res_start, dirty->start); + res_end = max(res_end, dirty->end); + while (cur < res_end) { + unsigned long num; + + start = find_next_bit(&dirty->bitmap[0], res_end, cur); + if (start >= res_end) + break; + + end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); + cur = end + 1; + num = end - start; + bitmap_clear(&dirty->bitmap[0], start, num); + vmw_resource_dirty_update(res, start, end); + } + + if (res_start <= dirty->start && res_end > dirty->start) + dirty->start = res_end; + if (res_start < dirty->end && res_end >= dirty->end) + dirty->end = res_start; +} + +/** + * vmw_bo_dirty_clear_res - Clear a resource's dirty region from + * its backing mob. + * @res: The resource + * + * This function will clear all dirty ranges affecting the resource from + * it's backup mob's dirty tracking. + */ +void vmw_bo_dirty_clear_res(struct vmw_resource *res) +{ + unsigned long res_start = res->backup_offset; + unsigned long res_end = res->backup_offset + res->backup_size; + struct vmw_buffer_object *vbo = res->backup; + struct vmw_bo_dirty *dirty = vbo->dirty; + + res_start >>= PAGE_SHIFT; + res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); + + if (res_start >= dirty->end || res_end <= dirty->start) + return; + + res_start = max(res_start, dirty->start); + res_end = min(res_end, dirty->end); + bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start); + + if (res_start <= dirty->start && res_end > dirty->start) + dirty->start = res_end; + if (res_start < dirty->end && res_end >= dirty->end) + dirty->end = res_start; +} + +vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + vm_fault_t ret; + unsigned long page_offset; + unsigned int save_flags; + struct vmw_buffer_object *vbo = + container_of(bo, typeof(*vbo), base); + + /* + * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly. + * So make sure the TTM helpers are aware. + */ + save_flags = vmf->flags; + vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; + ret = ttm_bo_vm_reserve(bo, vmf); + vmf->flags = save_flags; + if (ret) + return ret; + + page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); + if (unlikely(page_offset >= bo->num_pages)) { + ret = VM_FAULT_SIGBUS; + goto out_unlock; + } + + if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && + !test_bit(page_offset, &vbo->dirty->bitmap[0])) { + struct vmw_bo_dirty *dirty = vbo->dirty; + + __set_bit(page_offset, &dirty->bitmap[0]); + dirty->start = min(dirty->start, page_offset); + dirty->end = max(dirty->end, page_offset + 1); + } + +out_unlock: + dma_resv_unlock(bo->base.resv); + return ret; +} + +vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct vmw_buffer_object *vbo = + container_of(bo, struct vmw_buffer_object, base); + pgoff_t num_prefault; + pgprot_t prot; + vm_fault_t ret; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : + TTM_BO_VM_NUM_PREFAULT; + + if (vbo->dirty) { + pgoff_t allowed_prefault; + unsigned long page_offset; + + page_offset = vmf->pgoff - + drm_vma_node_start(&bo->base.vma_node); + if (page_offset >= bo->num_pages || + vmw_resources_clean(vbo, page_offset, + page_offset + PAGE_SIZE, + &allowed_prefault)) { + ret = VM_FAULT_SIGBUS; + goto out_unlock; + } + + num_prefault = min(num_prefault, allowed_prefault); + } + + /* + * If we don't track dirty using the MKWRITE method, make sure + * sure the page protection is write-enabled so we don't get + * a lot of unnecessary write faults. + */ + if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) + prot = vma->vm_page_prot; + else + prot = vm_get_page_prot(vma->vm_flags); + + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +out_unlock: + dma_resv_unlock(bo->base.resv); + + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6dfe36fb817c..c8441030637a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -40,11 +40,24 @@ void vmw_resource_mob_attach(struct vmw_resource *res) { struct vmw_buffer_object *backup = res->backup; + struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; dma_resv_assert_held(res->backup->base.base.resv); res->used_prio = (res->res_dirty) ? res->func->dirty_prio : res->func->prio; - list_add_tail(&res->mob_head, &backup->res_list); + + while (*new) { + struct vmw_resource *this = + container_of(*new, struct vmw_resource, mob_node); + + parent = *new; + new = (res->backup_offset < this->backup_offset) ? + &((*new)->rb_left) : &((*new)->rb_right); + } + + rb_link_node(&res->mob_node, parent, new); + rb_insert_color(&res->mob_node, &backup->res_tree); + vmw_bo_prio_add(backup, res->used_prio); } @@ -58,7 +71,8 @@ void vmw_resource_mob_detach(struct vmw_resource *res) dma_resv_assert_held(backup->base.base.resv); if (vmw_resource_mob_attached(res)) { - list_del_init(&res->mob_head); + rb_erase(&res->mob_node, &backup->res_tree); + RB_CLEAR_NODE(&res->mob_node); vmw_bo_prio_del(backup, res->used_prio); } } @@ -119,6 +133,10 @@ static void vmw_resource_release(struct kref *kref) } res->backup_dirty = false; vmw_resource_mob_detach(res); + if (res->dirty) + res->func->dirty_free(res); + if (res->coherent) + vmw_bo_dirty_release(res->backup); ttm_bo_unreserve(bo); vmw_bo_unreference(&res->backup); } @@ -200,15 +218,17 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, res->res_free = res_free; res->dev_priv = dev_priv; res->func = func; + RB_CLEAR_NODE(&res->mob_node); INIT_LIST_HEAD(&res->lru_head); - INIT_LIST_HEAD(&res->mob_head); INIT_LIST_HEAD(&res->binding_head); res->id = -1; res->backup = NULL; res->backup_offset = 0; res->backup_dirty = false; res->res_dirty = false; + res->coherent = false; res->used_prio = 3; + res->dirty = NULL; if (delay_id) return 0; else @@ -373,7 +393,8 @@ out_no_bo: * should be retried once resources have been freed up. */ static int vmw_resource_do_validate(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf) + struct ttm_validate_buffer *val_buf, + bool dirtying) { int ret = 0; const struct vmw_res_func *func = res->func; @@ -395,6 +416,39 @@ static int vmw_resource_do_validate(struct vmw_resource *res, vmw_resource_mob_attach(res); } + /* + * Handle the case where the backup mob is marked coherent but + * the resource isn't. + */ + if (func->dirty_alloc && vmw_resource_mob_attached(res) && + !res->coherent) { + if (res->backup->dirty && !res->dirty) { + ret = func->dirty_alloc(res); + if (ret) + return ret; + } else if (!res->backup->dirty && res->dirty) { + func->dirty_free(res); + } + } + + /* + * Transfer the dirty regions to the resource and update + * the resource. + */ + if (res->dirty) { + if (dirtying && !res->res_dirty) { + pgoff_t start = res->backup_offset >> PAGE_SHIFT; + pgoff_t end = __KERNEL_DIV_ROUND_UP + (res->backup_offset + res->backup_size, + PAGE_SIZE); + + vmw_bo_dirty_unmap(res->backup, start, end); + } + + vmw_bo_dirty_transfer_to_res(res); + return func->dirty_sync(res); + } + return 0; out_bind_failed: @@ -433,16 +487,28 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (switch_backup && new_backup != res->backup) { if (res->backup) { vmw_resource_mob_detach(res); + if (res->coherent) + vmw_bo_dirty_release(res->backup); vmw_bo_unreference(&res->backup); } if (new_backup) { res->backup = vmw_bo_reference(new_backup); + + /* + * The validation code should already have added a + * dirty tracker here. + */ + WARN_ON(res->coherent && !new_backup->dirty); + vmw_resource_mob_attach(res); } else { res->backup = NULL; } + } else if (switch_backup && res->coherent) { + vmw_bo_dirty_release(res->backup); } + if (switch_backup) res->backup_offset = new_backup_offset; @@ -622,6 +688,7 @@ out_no_unbind: * to the device. * @res: The resource to make visible to the device. * @intr: Perform waits interruptible if possible. + * @dirtying: Pending GPU operation will dirty the resource * * On succesful return, any backup DMA buffer pointed to by @res->backup will * be reserved and validated. @@ -631,7 +698,8 @@ out_no_unbind: * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code * on failure. */ -int vmw_resource_validate(struct vmw_resource *res, bool intr) +int vmw_resource_validate(struct vmw_resource *res, bool intr, + bool dirtying) { int ret; struct vmw_resource *evict_res; @@ -648,7 +716,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr) if (res->backup) val_buf.bo = &res->backup->base; do { - ret = vmw_resource_do_validate(res, &val_buf); + ret = vmw_resource_do_validate(res, &val_buf, dirtying); if (likely(ret != -EBUSY)) break; @@ -711,19 +779,20 @@ out_no_validate: */ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) { - - struct vmw_resource *res, *next; struct ttm_validate_buffer val_buf = { .bo = &vbo->base, .num_shared = 0 }; dma_resv_assert_held(vbo->base.base.resv); - list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { - if (!res->func->unbind) - continue; + while (!RB_EMPTY_ROOT(&vbo->res_tree)) { + struct rb_node *node = vbo->res_tree.rb_node; + struct vmw_resource *res = + container_of(node, struct vmw_resource, mob_node); + + if (!WARN_ON_ONCE(!res->func->unbind)) + (void) res->func->unbind(res, res->res_dirty, &val_buf); - (void) res->func->unbind(res, res->res_dirty, &val_buf); res->backup_dirty = true; res->res_dirty = false; vmw_resource_mob_detach(res); @@ -947,7 +1016,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) /* Do we really need to pin the MOB as well? */ vmw_bo_pin_reserved(vbo, true); } - ret = vmw_resource_validate(res, interruptible); + ret = vmw_resource_validate(res, interruptible, true); if (vbo) ttm_bo_unreserve(&vbo->base); if (ret) @@ -1007,3 +1076,101 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res) { return res->func->res_type; } + +/** + * vmw_resource_update_dirty - Update a resource's dirty tracker with a + * sequential range of touched backing store memory. + * @res: The resource. + * @start: The first page touched. + * @end: The last page touched + 1. + */ +void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, + pgoff_t end) +{ + if (res->dirty) + res->func->dirty_range_add(res, start << PAGE_SHIFT, + end << PAGE_SHIFT); +} + +/** + * vmw_resources_clean - Clean resources intersecting a mob range + * @vbo: The mob buffer object + * @start: The mob page offset starting the range + * @end: The mob page offset ending the range + * @num_prefault: Returns how many pages including the first have been + * cleaned and are ok to prefault + */ +int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, + pgoff_t end, pgoff_t *num_prefault) +{ + struct rb_node *cur = vbo->res_tree.rb_node; + struct vmw_resource *found = NULL; + unsigned long res_start = start << PAGE_SHIFT; + unsigned long res_end = end << PAGE_SHIFT; + unsigned long last_cleaned = 0; + + /* + * Find the resource with lowest backup_offset that intersects the + * range. + */ + while (cur) { + struct vmw_resource *cur_res = + container_of(cur, struct vmw_resource, mob_node); + + if (cur_res->backup_offset >= res_end) { + cur = cur->rb_left; + } else if (cur_res->backup_offset + cur_res->backup_size <= + res_start) { + cur = cur->rb_right; + } else { + found = cur_res; + cur = cur->rb_left; + /* Continue to look for resources with lower offsets */ + } + } + + /* + * In order of increasing backup_offset, clean dirty resorces + * intersecting the range. + */ + while (found) { + if (found->res_dirty) { + int ret; + + if (!found->func->clean) + return -EINVAL; + + ret = found->func->clean(found); + if (ret) + return ret; + + found->res_dirty = false; + } + last_cleaned = found->backup_offset + found->backup_size; + cur = rb_next(&found->mob_node); + if (!cur) + break; + + found = container_of(cur, struct vmw_resource, mob_node); + if (found->backup_offset >= res_end) + break; + } + + /* + * Set number of pages allowed prefaulting and fence the buffer object + */ + *num_prefault = 1; + if (last_cleaned > res_start) { + struct ttm_buffer_object *bo = &vbo->base; + + *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, + PAGE_SIZE); + vmw_bo_fence_single(bo, NULL); + if (bo->moving) + dma_fence_put(bo->moving); + bo->moving = dma_fence_get + (dma_resv_get_excl(bo->base.resv)); + } + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h index 984e588c62ca..3b7438b2d289 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h @@ -71,6 +71,13 @@ struct vmw_user_resource_conv { * @commit_notify: If the resource is a command buffer managed resource, * callback to notify that a define or remove command * has been committed to the device. + * @dirty_alloc: Allocate a dirty tracker. NULL if dirty-tracking is not + * supported. + * @dirty_free: Free the dirty tracker. + * @dirty_sync: Upload the dirty mob contents to the resource. + * @dirty_add_range: Add a sequential dirty range to the resource + * dirty tracker. + * @clean: Clean the resource. */ struct vmw_res_func { enum vmw_res_type res_type; @@ -90,6 +97,12 @@ struct vmw_res_func { struct ttm_validate_buffer *val_buf); void (*commit_notify)(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); + int (*dirty_alloc)(struct vmw_resource *res); + void (*dirty_free)(struct vmw_resource *res); + int (*dirty_sync)(struct vmw_resource *res); + void (*dirty_range_add)(struct vmw_resource *res, size_t start, + size_t end); + int (*clean)(struct vmw_resource *res); }; /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 29d8794f0421..32b9131b2bae 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -68,6 +68,20 @@ struct vmw_surface_offset { uint32_t bo_offset; }; +/** + * vmw_surface_dirty - Surface dirty-tracker + * @cache: Cached layout information of the surface. + * @size: Accounting size for the struct vmw_surface_dirty. + * @num_subres: Number of subresources. + * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. + */ +struct vmw_surface_dirty { + struct svga3dsurface_cache cache; + size_t size; + u32 num_subres; + SVGA3dBox boxes[0]; +}; + static void vmw_user_surface_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_surface_base_to_res(struct ttm_base_object *base); @@ -96,6 +110,13 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, struct drm_vmw_gb_surface_ref_ext_rep *rep, struct drm_file *file_priv); +static void vmw_surface_dirty_free(struct vmw_resource *res); +static int vmw_surface_dirty_alloc(struct vmw_resource *res); +static int vmw_surface_dirty_sync(struct vmw_resource *res); +static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, + size_t end); +static int vmw_surface_clean(struct vmw_resource *res); + static const struct vmw_user_resource_conv user_surface_conv = { .object_type = VMW_RES_SURFACE, .base_obj_to_res = vmw_user_surface_base_to_res, @@ -133,7 +154,12 @@ static const struct vmw_res_func vmw_gb_surface_func = { .create = vmw_gb_surface_create, .destroy = vmw_gb_surface_destroy, .bind = vmw_gb_surface_bind, - .unbind = vmw_gb_surface_unbind + .unbind = vmw_gb_surface_unbind, + .dirty_alloc = vmw_surface_dirty_alloc, + .dirty_free = vmw_surface_dirty_free, + .dirty_sync = vmw_surface_dirty_sync, + .dirty_range_add = vmw_surface_dirty_range_add, + .clean = vmw_surface_clean, }; /** @@ -336,7 +362,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; void *cmd; if (res->func->destroy == vmw_gb_surface_destroy) { @@ -360,7 +385,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) */ mutex_lock(&dev_priv->cmdbuf_mutex); - srf = vmw_res_to_srf(res); dev_priv->used_memory_size -= res->backup_size; mutex_unlock(&dev_priv->cmdbuf_mutex); } @@ -641,6 +665,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_private *dev_priv = srf->res.dev_priv; uint32_t size = user_srf->size; + WARN_ON_ONCE(res->dirty); if (user_srf->master) drm_master_put(&user_srf->master); kfree(srf->offsets); @@ -1168,10 +1193,16 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; cmd2->header.size = sizeof(cmd2->body); cmd2->body.sid = res->id; - res->backup_dirty = false; } vmw_fifo_commit(dev_priv, submit_size); + if (res->backup->dirty && res->backup_dirty) { + /* We've just made a full upload. Cear dirty regions. */ + vmw_bo_dirty_clear_res(res); + } + + res->backup_dirty = false; + return 0; } @@ -1636,7 +1667,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } } } else if (req->base.drm_surface_flags & - drm_vmw_surface_flag_create_buffer) + (drm_vmw_surface_flag_create_buffer | + drm_vmw_surface_flag_coherent)) ret = vmw_user_bo_alloc(dev_priv, tfile, res->backup_size, req->base.drm_surface_flags & @@ -1650,6 +1682,26 @@ vmw_gb_surface_define_internal(struct drm_device *dev, goto out_unlock; } + if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { + struct vmw_buffer_object *backup = res->backup; + + ttm_bo_reserve(&backup->base, false, false, NULL); + if (!res->func->dirty_alloc) + ret = -EINVAL; + if (!ret) + ret = vmw_bo_dirty_add(backup); + if (!ret) { + res->coherent = true; + ret = res->func->dirty_alloc(res); + } + ttm_bo_unreserve(&backup->base); + if (ret) { + vmw_resource_unreference(&res); + goto out_unlock; + } + + } + tmp = vmw_resource_reference(res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->base.drm_surface_flags & @@ -1758,3 +1810,338 @@ out_bad_resource: return ret; } + +/** + * vmw_subres_dirty_add - Add a dirty region to a subresource + * @dirty: The surfaces's dirty tracker. + * @loc_start: The location corresponding to the start of the region. + * @loc_end: The location corresponding to the end of the region. + * + * As we are assuming that @loc_start and @loc_end represent a sequential + * range of backing store memory, if the region spans multiple lines then + * regardless of the x coordinate, the full lines are dirtied. + * Correspondingly if the region spans multiple z slices, then full rather + * than partial z slices are dirtied. + */ +static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, + const struct svga3dsurface_loc *loc_start, + const struct svga3dsurface_loc *loc_end) +{ + const struct svga3dsurface_cache *cache = &dirty->cache; + SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; + u32 mip = loc_start->sub_resource % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + u32 box_c2 = box->z + box->d; + + if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) + return; + + if (box->d == 0 || box->z > loc_start->z) + box->z = loc_start->z; + if (box_c2 < loc_end->z) + box->d = loc_end->z - box->z; + + if (loc_start->z + 1 == loc_end->z) { + box_c2 = box->y + box->h; + if (box->h == 0 || box->y > loc_start->y) + box->y = loc_start->y; + if (box_c2 < loc_end->y) + box->h = loc_end->y - box->y; + + if (loc_start->y + 1 == loc_end->y) { + box_c2 = box->x + box->w; + if (box->w == 0 || box->x > loc_start->x) + box->x = loc_start->x; + if (box_c2 < loc_end->x) + box->w = loc_end->x - box->x; + } else { + box->x = 0; + box->w = size->width; + } + } else { + box->y = 0; + box->h = size->height; + box->x = 0; + box->w = size->width; + } +} + +/** + * vmw_subres_dirty_full - Mark a full subresource as dirty + * @dirty: The surface's dirty tracker. + * @subres: The subresource + */ +static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) +{ + const struct svga3dsurface_cache *cache = &dirty->cache; + u32 mip = subres % cache->num_mip_levels; + const struct drm_vmw_size *size = &cache->mip[mip].size; + SVGA3dBox *box = &dirty->boxes[subres]; + + box->x = 0; + box->y = 0; + box->z = 0; + box->w = size->width; + box->h = size->height; + box->d = size->depth; +} + +/* + * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture + * surfaces. + */ +static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, + size_t start, size_t end) +{ + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + size_t backup_end = res->backup_offset + res->backup_size; + struct svga3dsurface_loc loc1, loc2; + const struct svga3dsurface_cache *cache; + + start = max_t(size_t, start, res->backup_offset) - res->backup_offset; + end = min(end, backup_end) - res->backup_offset; + cache = &dirty->cache; + svga3dsurface_get_loc(cache, &loc1, start); + svga3dsurface_get_loc(cache, &loc2, end - 1); + svga3dsurface_inc_loc(cache, &loc2); + + if (loc1.sub_resource + 1 == loc2.sub_resource) { + /* Dirty range covers a single sub-resource */ + vmw_subres_dirty_add(dirty, &loc1, &loc2); + } else { + /* Dirty range covers multiple sub-resources */ + struct svga3dsurface_loc loc_min, loc_max; + u32 sub_res; + + svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max); + vmw_subres_dirty_add(dirty, &loc1, &loc_max); + svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min); + vmw_subres_dirty_add(dirty, &loc_min, &loc2); + for (sub_res = loc1.sub_resource + 1; + sub_res < loc2.sub_resource - 1; ++sub_res) + vmw_subres_dirty_full(dirty, sub_res); + } +} + +/* + * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer + * surfaces. + */ +static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, + size_t start, size_t end) +{ + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + const struct svga3dsurface_cache *cache = &dirty->cache; + size_t backup_end = res->backup_offset + cache->mip_chain_bytes; + SVGA3dBox *box = &dirty->boxes[0]; + u32 box_c2; + + box->h = box->d = 1; + start = max_t(size_t, start, res->backup_offset) - res->backup_offset; + end = min(end, backup_end) - res->backup_offset; + box_c2 = box->x + box->w; + if (box->w == 0 || box->x > start) + box->x = start; + if (box_c2 < end) + box->w = end - box->x; +} + +/* + * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces + */ +static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, + size_t end) +{ + struct vmw_surface *srf = vmw_res_to_srf(res); + + if (WARN_ON(end <= res->backup_offset || + start >= res->backup_offset + res->backup_size)) + return; + + if (srf->format == SVGA3D_BUFFER) + vmw_surface_buf_dirty_range_add(res, start, end); + else + vmw_surface_tex_dirty_range_add(res, start, end); +} + +/* + * vmw_surface_dirty_sync - The surface's dirty_sync callback. + */ +static int vmw_surface_dirty_sync(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + bool has_dx = 0; + u32 i, num_dirty; + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + size_t alloc_size; + const struct svga3dsurface_cache *cache = &dirty->cache; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDXUpdateSubResource body; + } *cmd1; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBImage body; + } *cmd2; + void *cmd; + + num_dirty = 0; + for (i = 0; i < dirty->num_subres; ++i) { + const SVGA3dBox *box = &dirty->boxes[i]; + + if (box->d) + num_dirty++; + } + + if (!num_dirty) + goto out; + + alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2)); + cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); + if (!cmd) + return -ENOMEM; + + cmd1 = cmd; + cmd2 = cmd; + + for (i = 0; i < dirty->num_subres; ++i) { + const SVGA3dBox *box = &dirty->boxes[i]; + + if (!box->d) + continue; + + /* + * DX_UPDATE_SUBRESOURCE is aware of array surfaces. + * UPDATE_GB_IMAGE is not. + */ + if (has_dx) { + cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.sid = res->id; + cmd1->body.subResource = i; + cmd1->body.box = *box; + cmd1++; + } else { + cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; + cmd2->header.size = sizeof(cmd2->body); + cmd2->body.image.sid = res->id; + cmd2->body.image.face = i / cache->num_mip_levels; + cmd2->body.image.mipmap = i - + (cache->num_mip_levels * cmd2->body.image.face); + cmd2->body.box = *box; + cmd2++; + } + + } + vmw_fifo_commit(dev_priv, alloc_size); + out: + memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * + dirty->num_subres); + + return 0; +} + +/* + * vmw_surface_dirty_alloc - The surface's dirty_alloc callback. + */ +static int vmw_surface_dirty_alloc(struct vmw_resource *res) +{ + struct vmw_surface *srf = vmw_res_to_srf(res); + struct vmw_surface_dirty *dirty; + u32 num_layers = 1; + u32 num_mip; + u32 num_subres; + u32 num_samples; + size_t dirty_size, acc_size; + static struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + int ret; + + if (srf->array_size) + num_layers = srf->array_size; + else if (srf->flags & SVGA3D_SURFACE_CUBEMAP) + num_layers *= SVGA3D_MAX_SURFACE_FACES; + + num_mip = srf->mip_levels[0]; + if (!num_mip) + num_mip = 1; + + num_subres = num_layers * num_mip; + dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]); + acc_size = ttm_round_pot(dirty_size); + ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), + acc_size, &ctx); + if (ret) { + VMW_DEBUG_USER("Out of graphics memory for surface " + "dirty tracker.\n"); + return ret; + } + + dirty = kvzalloc(dirty_size, GFP_KERNEL); + if (!dirty) { + ret = -ENOMEM; + goto out_no_dirty; + } + + num_samples = max_t(u32, 1, srf->multisample_count); + ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip, + num_layers, num_samples, &dirty->cache); + if (ret) + goto out_no_cache; + + dirty->num_subres = num_subres; + dirty->size = acc_size; + res->dirty = (struct vmw_resource_dirty *) dirty; + + return 0; + +out_no_cache: + kvfree(dirty); +out_no_dirty: + ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); + return ret; +} + +/* + * vmw_surface_dirty_free - The surface's dirty_free callback + */ +static void vmw_surface_dirty_free(struct vmw_resource *res) +{ + struct vmw_surface_dirty *dirty = + (struct vmw_surface_dirty *) res->dirty; + size_t acc_size = dirty->size; + + kvfree(dirty); + ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); + res->dirty = NULL; +} + +/* + * vmw_surface_clean - The surface's clean callback + */ +static int vmw_surface_clean(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + size_t alloc_size; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdReadbackGBSurface body; + } *cmd; + + alloc_size = sizeof(*cmd); + cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); + if (!cmd) + return -ENOMEM; + + cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; + cmd->header.size = sizeof(cmd->body); + cmd->body.sid = res->id; + vmw_fifo_commit(dev_priv, alloc_size); + + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 5a7b8bb420de..ce288756531b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -29,10 +29,23 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) { + static const struct vm_operations_struct vmw_vm_ops = { + .pfn_mkwrite = vmw_bo_vm_mkwrite, + .page_mkwrite = vmw_bo_vm_mkwrite, + .fault = vmw_bo_vm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close + }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); + int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); - return ttm_bo_mmap(filp, vma, &dev_priv->bdev); + if (ret) + return ret; + + vma->vm_ops = &vmw_vm_ops; + + return 0; } /* struct vmw_validation_mem callback */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index 7bff3628fc54..e69bc373ae2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -33,6 +33,8 @@ * struct vmw_validation_bo_node - Buffer object validation metadata. * @base: Metadata used for TTM reservation- and validation. * @hash: A hash entry used for the duplicate detection hash table. + * @coherent_count: If switching backup buffers, number of new coherent + * resources that will have this buffer as a backup buffer. * @as_mob: Validate as mob. * @cpu_blit: Validate for cpu blit access. * @@ -42,6 +44,7 @@ struct vmw_validation_bo_node { struct ttm_validate_buffer base; struct drm_hash_item hash; + unsigned int coherent_count; u32 as_mob : 1; u32 cpu_blit : 1; }; @@ -459,6 +462,19 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx, if (ret) goto out_unreserve; } + + if (val->switching_backup && val->new_backup && + res->coherent) { + struct vmw_validation_bo_node *bo_node = + vmw_validation_find_bo_dup(ctx, + val->new_backup); + + if (WARN_ON(!bo_node)) { + ret = -EINVAL; + goto out_unreserve; + } + bo_node->coherent_count++; + } } return 0; @@ -565,6 +581,9 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) int ret; list_for_each_entry(entry, &ctx->bo_list, base.head) { + struct vmw_buffer_object *vbo = + container_of(entry->base.bo, typeof(*vbo), base); + if (entry->cpu_blit) { struct ttm_operation_ctx ctx = { .interruptible = intr, @@ -579,6 +598,27 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) } if (ret) return ret; + + /* + * Rather than having the resource code allocating the bo + * dirty tracker in resource_unreserve() where we can't fail, + * Do it here when validating the buffer object. + */ + if (entry->coherent_count) { + unsigned int coherent_count = entry->coherent_count; + + while (coherent_count) { + ret = vmw_bo_dirty_add(vbo); + if (ret) + return ret; + + coherent_count--; + } + entry->coherent_count -= coherent_count; + } + + if (vbo->dirty) + vmw_bo_dirty_scan(vbo); } return 0; } @@ -604,7 +644,8 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) struct vmw_resource *res = val->res; struct vmw_buffer_object *backup = res->backup; - ret = vmw_resource_validate(res, intr); + ret = vmw_resource_validate(res, intr, val->dirty_set && + val->dirty); if (ret) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to validate resource.\n"); @@ -831,3 +872,34 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx, ctx->mem_size_left += size; return 0; } + +/** + * vmw_validation_bo_backoff - Unreserve buffer objects registered with a + * validation context + * @ctx: The validation context + * + * This function unreserves the buffer objects previously reserved using + * vmw_validation_bo_reserve. It's typically used as part of an error path + */ +void vmw_validation_bo_backoff(struct vmw_validation_context *ctx) +{ + struct vmw_validation_bo_node *entry; + + /* + * Switching coherent resource backup buffers failed. + * Release corresponding buffer object dirty trackers. + */ + list_for_each_entry(entry, &ctx->bo_list, base.head) { + if (entry->coherent_count) { + unsigned int coherent_count = entry->coherent_count; + struct vmw_buffer_object *vbo = + container_of(entry->base.bo, typeof(*vbo), + base); + + while (coherent_count--) + vmw_bo_dirty_release(vbo); + } + } + + ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 71ce4b318850..739906d1b3eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -174,20 +174,6 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx, } /** - * vmw_validation_bo_backoff - Unreserve buffer objects registered with a - * validation context - * @ctx: The validation context - * - * This function unreserves the buffer objects previously reserved using - * vmw_validation_bo_reserve. It's typically used as part of an error path - */ -static inline void -vmw_validation_bo_backoff(struct vmw_validation_context *ctx) -{ - ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list); -} - -/** * vmw_validation_bo_fence - Unreserve and fence buffer objects registered * with a validation context * @ctx: The validation context @@ -269,4 +255,6 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx, unsigned int size); void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, void *val_private, u32 dirty); +void vmw_validation_bo_backoff(struct vmw_validation_context *ctx); + #endif diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig index cf987a317a55..6dab94adf25e 100644 --- a/drivers/gpu/host1x/Kconfig +++ b/drivers/gpu/host1x/Kconfig @@ -2,7 +2,7 @@ config TEGRA_HOST1X tristate "NVIDIA Tegra host1x driver" depends on ARCH_TEGRA || (ARM && COMPILE_TEST) - select IOMMU_IOVA if IOMMU_SUPPORT + select IOMMU_IOVA help Driver for the NVIDIA Tegra host1x hardware. diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 742aa9ff21b8..2c8559ff3481 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x, of_dma_configure(&device->dev, host1x->dev->of_node, true); device->dev.dma_parms = &device->dma_parms; - dma_set_max_seg_size(&device->dev, SZ_4M); + dma_set_max_seg_size(&device->dev, UINT_MAX); err = host1x_device_parse_dt(device, driver); if (err < 0) { diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c index 48c84c48299c..e8d3fda91d8a 100644 --- a/drivers/gpu/host1x/cdma.c +++ b/drivers/gpu/host1x/cdma.c @@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma, * * Must be called with the cdma lock held. */ -int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x, - struct host1x_cdma *cdma, - unsigned int needed) +static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x, + struct host1x_cdma *cdma, + unsigned int needed) { while (true) { struct push_buffer *pb = &cdma->push_buffer; diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index 1436295aa450..4cd212bb570d 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host) /** * host1x_channel_request() - Allocate a channel - * @device: Host1x unit this channel will be used to send commands to + * @client: Host1x client this channel will be used to send commands to * - * Allocates a new host1x channel for @device. May return NULL if CDMA + * Allocates a new host1x channel for @client. May return NULL if CDMA * initialization fails. */ -struct host1x_channel *host1x_channel_request(struct device *dev) +struct host1x_channel *host1x_channel_request(struct host1x_client *client) { - struct host1x *host = dev_get_drvdata(dev->parent); + struct host1x *host = dev_get_drvdata(client->dev->parent); struct host1x_channel_list *chlist = &host->channel_list; struct host1x_channel *channel; int err; @@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev) kref_init(&channel->refcount); mutex_init(&channel->submitlock); - channel->dev = dev; + channel->client = client; + channel->dev = client->dev; err = host1x_hw_channel_init(host, channel, channel->id); if (err < 0) @@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev) fail: clear_bit(channel->id, chlist->allocated_channels); - dev_err(dev, "failed to initialize channel\n"); + dev_err(client->dev, "failed to initialize channel\n"); return NULL; } diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h index 4fd694834f74..39044ff6c3aa 100644 --- a/drivers/gpu/host1x/channel.h +++ b/drivers/gpu/host1x/channel.h @@ -26,6 +26,7 @@ struct host1x_channel { unsigned int id; struct mutex submitlock; void __iomem *regs; + struct host1x_client *client; struct device *dev; struct host1x_cdma cdma; }; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 5a3f797240d4..a738ea55e407 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -18,10 +18,6 @@ #include <trace/events/host1x.h> #undef CREATE_TRACE_POINTS -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) -#include <asm/dma-iommu.h> -#endif - #include "bus.h" #include "channel.h" #include "debug.h" @@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = { .init = host1x01_init, .sync_offset = 0x3000, .dma_mask = DMA_BIT_MASK(32), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_info host1x02_info = { @@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = { .init = host1x02_init, .sync_offset = 0x3000, .dma_mask = DMA_BIT_MASK(32), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_info host1x04_info = { @@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = { .init = host1x04_init, .sync_offset = 0x2100, .dma_mask = DMA_BIT_MASK(34), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_info host1x05_info = { @@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = { .init = host1x05_init, .sync_offset = 0x2100, .dma_mask = DMA_BIT_MASK(34), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_sid_entry tegra186_sid_table[] = { @@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = { .init = host1x06_init, .sync_offset = 0x0, .dma_mask = DMA_BIT_MASK(40), + .has_wide_gather = true, .has_hypervisor = true, .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), .sid_table = tegra186_sid_table, @@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = { .init = host1x07_init, .sync_offset = 0x0, .dma_mask = DMA_BIT_MASK(40), + .has_wide_gather = true, .has_hypervisor = true, .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), .sid_table = tegra194_sid_table, @@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host) } } +static struct iommu_domain *host1x_iommu_attach(struct host1x *host) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); + int err; + + /* + * If the host1x firewall is enabled, there's no need to enable IOMMU + * support. Similarly, if host1x is already attached to an IOMMU (via + * the DMA API), don't try to attach again. + */ + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain) + return domain; + + host->group = iommu_group_get(host->dev); + if (host->group) { + struct iommu_domain_geometry *geometry; + dma_addr_t start, end; + unsigned long order; + + err = iova_cache_get(); + if (err < 0) + goto put_group; + + host->domain = iommu_domain_alloc(&platform_bus_type); + if (!host->domain) { + err = -ENOMEM; + goto put_cache; + } + + err = iommu_attach_group(host->domain, host->group); + if (err) { + if (err == -ENODEV) + err = 0; + + goto free_domain; + } + + geometry = &host->domain->geometry; + start = geometry->aperture_start & host->info->dma_mask; + end = geometry->aperture_end & host->info->dma_mask; + + order = __ffs(host->domain->pgsize_bitmap); + init_iova_domain(&host->iova, 1UL << order, start >> order); + host->iova_end = end; + + domain = host->domain; + } + + return domain; + +free_domain: + iommu_domain_free(host->domain); + host->domain = NULL; +put_cache: + iova_cache_put(); +put_group: + iommu_group_put(host->group); + host->group = NULL; + + return ERR_PTR(err); +} + +static int host1x_iommu_init(struct host1x *host) +{ + u64 mask = host->info->dma_mask; + struct iommu_domain *domain; + int err; + + domain = host1x_iommu_attach(host); + if (IS_ERR(domain)) { + err = PTR_ERR(domain); + dev_err(host->dev, "failed to attach to IOMMU: %d\n", err); + return err; + } + + /* + * If we're not behind an IOMMU make sure we don't get push buffers + * that are allocated outside of the range addressable by the GATHER + * opcode. + * + * Newer generations of Tegra (Tegra186 and later) support a wide + * variant of the GATHER opcode that allows addressing more bits. + */ + if (!domain && !host->info->has_wide_gather) + mask = DMA_BIT_MASK(32); + + err = dma_coerce_mask_and_coherent(host->dev, mask); + if (err < 0) { + dev_err(host->dev, "failed to set DMA mask: %d\n", err); + return err; + } + + return 0; +} + +static void host1x_iommu_exit(struct host1x *host) +{ + if (host->domain) { + put_iova_domain(&host->iova); + iommu_detach_group(host->domain, host->group); + + iommu_domain_free(host->domain); + host->domain = NULL; + + iova_cache_put(); + + iommu_group_put(host->group); + host->group = NULL; + } +} + static int host1x_probe(struct platform_device *pdev) { struct host1x *host; @@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev) return PTR_ERR(host->hv_regs); } - dma_set_mask_and_coherent(host->dev, host->info->dma_mask); + host->dev->dma_parms = &host->dma_parms; + dma_set_max_seg_size(host->dev, UINT_MAX); if (host->info->init) { err = host->info->init(host); @@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) - if (host->dev->archdata.mapping) { - struct dma_iommu_mapping *mapping = - to_dma_iommu_mapping(host->dev); - arm_iommu_detach_device(host->dev); - arm_iommu_release_mapping(mapping); - } -#endif - if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) - goto skip_iommu; - - host->group = iommu_group_get(&pdev->dev); - if (host->group) { - struct iommu_domain_geometry *geometry; - u64 mask = dma_get_mask(host->dev); - dma_addr_t start, end; - unsigned long order; - - err = iova_cache_get(); - if (err < 0) - goto put_group; - - host->domain = iommu_domain_alloc(&platform_bus_type); - if (!host->domain) { - err = -ENOMEM; - goto put_cache; - } - err = iommu_attach_group(host->domain, host->group); - if (err) { - if (err == -ENODEV) { - iommu_domain_free(host->domain); - host->domain = NULL; - iova_cache_put(); - iommu_group_put(host->group); - host->group = NULL; - goto skip_iommu; - } - - goto fail_free_domain; - } - - geometry = &host->domain->geometry; - start = geometry->aperture_start & mask; - end = geometry->aperture_end & mask; - - order = __ffs(host->domain->pgsize_bitmap); - init_iova_domain(&host->iova, 1UL << order, start >> order); - host->iova_end = end; + err = host1x_iommu_init(host); + if (err < 0) { + dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err); + return err; } -skip_iommu: err = host1x_channel_list_init(&host->channel_list, host->info->nb_channels); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); - goto fail_detach_device; + goto iommu_exit; } err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); - goto fail_free_channels; + goto free_channels; } err = reset_control_deassert(host->rst); if (err < 0) { dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); - goto fail_unprepare_disable; + goto unprepare_disable; } err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); - goto fail_reset_assert; + goto reset_assert; } err = host1x_intr_init(host, syncpt_irq); if (err) { dev_err(&pdev->dev, "failed to initialize interrupts\n"); - goto fail_deinit_syncpt; + goto deinit_syncpt; } host1x_debug_init(host); @@ -351,33 +432,22 @@ skip_iommu: err = host1x_register(host); if (err < 0) - goto fail_deinit_intr; + goto deinit_intr; return 0; -fail_deinit_intr: +deinit_intr: host1x_intr_deinit(host); -fail_deinit_syncpt: +deinit_syncpt: host1x_syncpt_deinit(host); -fail_reset_assert: +reset_assert: reset_control_assert(host->rst); -fail_unprepare_disable: +unprepare_disable: clk_disable_unprepare(host->clk); -fail_free_channels: +free_channels: host1x_channel_list_free(&host->channel_list); -fail_detach_device: - if (host->group && host->domain) { - put_iova_domain(&host->iova); - iommu_detach_group(host->domain, host->group); - } -fail_free_domain: - if (host->domain) - iommu_domain_free(host->domain); -put_cache: - if (host->group) - iova_cache_put(); -put_group: - iommu_group_put(host->group); +iommu_exit: + host1x_iommu_exit(host); return err; } @@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev) struct host1x *host = platform_get_drvdata(pdev); host1x_unregister(host); + host1x_debug_deinit(host); host1x_intr_deinit(host); host1x_syncpt_deinit(host); reset_control_assert(host->rst); clk_disable_unprepare(host->clk); - - if (host->domain) { - put_iova_domain(&host->iova); - iommu_detach_group(host->domain, host->group); - iommu_domain_free(host->domain); - iova_cache_put(); - iommu_group_put(host->group); - } + host1x_iommu_exit(host); return 0; } diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index ff56f5e23a02..f781a9b0f39d 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -97,6 +97,7 @@ struct host1x_info { int (*init)(struct host1x *host1x); /* initialize per SoC ops */ unsigned int sync_offset; /* offset of syncpoint registers */ u64 dma_mask; /* mask of addressable memory */ + bool has_wide_gather; /* supports GATHER_W opcode */ bool has_hypervisor; /* has hypervisor registers */ unsigned int num_sid_entries; const struct host1x_sid_entry *sid_table; @@ -140,6 +141,8 @@ struct host1x { struct list_head devices; struct list_head list; + + struct device_dma_parameters dma_parms; }; void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c index 26f3c741d085..9245add23b5d 100644 --- a/drivers/gpu/host1x/intr.c +++ b/drivers/gpu/host1x/intr.c @@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter) /* Add nr_completed to trace */ trace_host1x_channel_submit_complete(dev_name(channel->dev), waiter->count, waiter->thresh); - } static void action_wakeup(struct host1x_waitlist *waiter) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index fb36d371e48e..60b2fedd0061 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather); static unsigned int pin_job(struct host1x *host, struct host1x_job *job) { + struct host1x_client *client = job->client; + struct device *dev = client->dev; unsigned int i; int err; @@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) for (i = 0; i < job->num_relocs; i++) { struct host1x_reloc *reloc = &job->relocs[i]; + dma_addr_t phys_addr, *phys; struct sg_table *sgt; - dma_addr_t phys_addr; reloc->target.bo = host1x_bo_get(reloc->target.bo); if (!reloc->target.bo) { @@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); + if (client->group) + phys = &phys_addr; + else + phys = NULL; + + sgt = host1x_bo_pin(dev, reloc->target.bo, phys); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + + if (sgt) { + unsigned long mask = HOST1X_RELOC_READ | + HOST1X_RELOC_WRITE; + enum dma_data_direction dir; + + switch (reloc->flags & mask) { + case HOST1X_RELOC_READ: + dir = DMA_TO_DEVICE; + break; + + case HOST1X_RELOC_WRITE: + dir = DMA_FROM_DEVICE; + break; + + case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE: + dir = DMA_BIDIRECTIONAL; + break; + + default: + err = -EINVAL; + goto unpin; + } + + err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); + if (!err) { + err = -ENOMEM; + goto unpin; + } + + job->unpins[job->num_unpins].dev = dev; + job->unpins[job->num_unpins].dir = dir; + phys_addr = sg_dma_address(sgt->sgl); + } job->addr_phys[job->num_unpins] = phys_addr; job->unpins[job->num_unpins].bo = reloc->target.bo; @@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - phys_addr = host1x_bo_pin(g->bo, &sgt); + sgt = host1x_bo_pin(host->dev, g->bo, NULL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { for_each_sg(sgt->sgl, sg, sgt->nents, j) @@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - job->addr_phys[job->num_unpins] = - iova_dma_addr(&host->iova, alloc); job->unpins[job->num_unpins].size = gather_size; + phys_addr = iova_dma_addr(&host->iova, alloc); } else { - job->addr_phys[job->num_unpins] = phys_addr; + err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + if (!err) { + err = -ENOMEM; + goto unpin; + } + + job->unpins[job->num_unpins].dev = host->dev; + phys_addr = sg_dma_address(sgt->sgl); } - job->gather_addr_phys[i] = job->addr_phys[job->num_unpins]; + job->addr_phys[job->num_unpins] = phys_addr; + job->gather_addr_phys[i] = phys_addr; + job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].bo = g->bo; job->unpins[job->num_unpins].sgt = sgt; job->num_unpins++; @@ -429,7 +487,8 @@ out: return err; } -static inline int copy_gathers(struct host1x_job *job, struct device *dev) +static inline int copy_gathers(struct device *host, struct host1x_job *job, + struct device *dev) { struct host1x_firewall fw; size_t size = 0; @@ -452,12 +511,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) * Try a non-blocking allocation from a higher priority pools first, * as awaiting for the allocation here is a major performance hit. */ - job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_NOWAIT); /* the higher priority allocation failed, try the generic-blocking */ if (!job->gather_copy_mapped) - job->gather_copy_mapped = dma_alloc_wc(dev, size, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_KERNEL); if (!job->gather_copy_mapped) @@ -505,7 +564,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) goto out; if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) { - err = copy_gathers(job, dev); + err = copy_gathers(host->dev, job, dev); if (err) goto out; } @@ -550,6 +609,8 @@ void host1x_job_unpin(struct host1x_job *job) for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; + struct device *dev = unpin->dev ?: host->dev; + struct sg_table *sgt = unpin->sgt; if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && unpin->size && host->domain) { @@ -559,14 +620,18 @@ void host1x_job_unpin(struct host1x_job *job) iova_pfn(&host->iova, job->addr_phys[i])); } - host1x_bo_unpin(unpin->bo, unpin->sgt); + if (unpin->dev && sgt) + dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents, + unpin->dir); + + host1x_bo_unpin(dev, unpin->bo, sgt); host1x_bo_put(unpin->bo); } job->num_unpins = 0; if (job->gather_copy_size) - dma_free_wc(job->channel->dev, job->gather_copy_size, + dma_free_wc(host->dev, job->gather_copy_size, job->gather_copy_mapped, job->gather_copy); } EXPORT_SYMBOL(host1x_job_unpin); diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h index 62b8805e6b35..94bc2e4ae241 100644 --- a/drivers/gpu/host1x/job.h +++ b/drivers/gpu/host1x/job.h @@ -8,6 +8,8 @@ #ifndef __HOST1X_JOB_H #define __HOST1X_JOB_H +#include <linux/dma-direction.h> + struct host1x_job_gather { unsigned int words; dma_addr_t base; @@ -19,7 +21,9 @@ struct host1x_job_gather { struct host1x_job_unpin_data { struct host1x_bo *bo; struct sg_table *sgt; + struct device *dev; size_t size; + enum dma_data_direction dir; }; /* |